after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index.
Parameters
----------
other : Series, or object coercible into Series
"""
if not isinstance(other, Series):
other = Series(other)
query_compiler = self._query_compiler.series_update(other._query_compiler)
self._update_inplace(new_query_compiler=query_compiler)
|
def update(self, other):
return self._default_to_pandas(pandas.Series.update, other)
|
https://github.com/modin-project/modin/issues/1557
|
import modin.pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]})
new_df = pd.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]})
df.update(new_df)
df
IndexError Traceback (most recent call last)
python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj)
392 if cls is not object \
393 and callable(cls.__dict__.get('__repr__')):
--> 394 return _repr_pprint(obj, self, cycle)
395
396 return _default_pprint(obj, self, cycle)
python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
682 """A pprint that just redirects to the normal repr function."""
683 # Find newlines and replace them with p.break_()
--> 684 output = repr(obj)
685 lines = output.splitlines()
686 with p.group():
modin/modin/pandas/dataframe.py in __repr__(self)
151
152 num_cols += len(self.columns) - i
--> 153 result = repr(self._build_repr_df(num_rows, num_cols))
154 if len(self.index) > num_rows or len(self.columns) > num_cols:
155 # The split here is so that we don't repr pandas row lengths.
modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
98 else:
99 indexer = row_indexer
--> 100 return self.iloc[indexer]._query_compiler.to_pandas()
101
102 def _update_inplace(self, new_query_compiler):
modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
173
174 def to_pandas(self):
--> 175 return self._modin_frame.to_pandas()
176
177 @classmethod
modin/modin/engines/base/frame/data.py in to_pandas(self)
1291 Pandas DataFrame.
1292 """
-> 1293 df = self._frame_mgr_cls.to_pandas(self._partitions)
1294 if df.empty:
1295 if len(self.columns) != 0:
modin/modin/engines/base/frame/partition_manager.py in to_pandas(cls, partitions)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/base/frame/partition_manager.py in <listcomp>(.0)
256 A Pandas DataFrame
257 """
--> 258 retrieved_objects = [[obj.to_pandas() for obj in part] for part in partitions]
259 if all(
260 isinstance(part, pandas.Series) for row in retrieved_objects for part in row
modin/modin/engines/python/pandas_on_python/frame/partition.py in to_pandas(self)
113 A Pandas DataFrame.
114 """
--> 115 dataframe = self.get()
116 assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series
117
modin/modin/engines/python/pandas_on_python/frame/partition.py in get(self)
46 The object that was `put`.
47 """
---> 48 self.drain_call_queue()
49 return self.data.copy()
50
modin/modin/engines/python/pandas_on_python/frame/partition.py in drain_call_queue(self)
86 if len(self.call_queue) == 0:
87 return
---> 88 self.apply(lambda x: x)
89
90 def mask(self, row_indices=None, col_indices=None):
modin/modin/engines/python/pandas_on_python/frame/partition.py in apply(self, func, **kwargs)
74 return result
75
---> 76 self.data = call_queue_closure(self.data, self.call_queue)
77 self.call_queue = []
78 return PandasOnPythonFramePartition(func(self.data.copy(), **kwargs))
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
71 except Exception as e:
72 self.call_queue = []
---> 73 raise e
74 return result
75
modin/modin/engines/python/pandas_on_python/frame/partition.py in call_queue_closure(data, call_queues)
68 for func, kwargs in call_queues:
69 try:
---> 70 result = func(result, **kwargs)
71 except Exception as e:
72 self.call_queue = []
modin/modin/engines/python/pandas_on_python/frame/partition.py in <lambda>(df)
90 def mask(self, row_indices=None, col_indices=None):
91 new_obj = self.add_to_apply_calls(
---> 92 lambda df: pandas.DataFrame(df.iloc[row_indices, col_indices])
93 )
94 new_obj._length_cache = (
python3.7/site-packages/pandas/core/indexing.py in __getitem__(self, key)
1760 except (KeyError, IndexError, AttributeError):
1761 pass
-> 1762 return self._getitem_tuple(key)
1763 else:
1764 # we by definition only have the 0th axis
python3.7/site-packages/pandas/core/indexing.py in _getitem_tuple(self, tup)
2065 def _getitem_tuple(self, tup: Tuple):
2066
-> 2067 self._has_valid_tuple(tup)
2068 try:
2069 return self._getitem_lowerdim(tup)
python3.7/site-packages/pandas/core/indexing.py in _has_valid_tuple(self, key)
701 raise IndexingError("Too many indexers")
702 try:
--> 703 self._validate_key(k, i)
704 except ValueError:
705 raise ValueError(
python3.7/site-packages/pandas/core/indexing.py in _validate_key(self, key, axis)
2007 # check that the key does not exceed the maximum size of the index
2008 if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
-> 2009 raise IndexError("positional indexers are out-of-bounds")
2010 else:
2011 raise ValueError(f"Can only index by location with a [{self._valid_types}]")
IndexError: positional indexers are out-of-bounds
|
IndexError
|
def groupby_reduce(
self,
by,
axis,
groupby_args,
map_func,
map_args,
reduce_func=None,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Apply a Groupby via MapReduce pattern.
Note: Result length will be the number of unique values in `by`.
Currently, here is how this is implemented:
- map phase:
During the map phase we set `as_index` to True to force the `by` into the
index for the next phase. We always do this so that the reduce phase has
complete access of the `by` data without having to shuffle it twice. The map
function is applied with the arguments provided. The `index` of the
partitions will become the new `by` column. Sometimes, the name of `by` is
the same as a data column. In these cases we add "_modin_groupby_" to the
name of the index. This does not happen when grouping by multiple columns
because those columns have already been dropped as a requirement.
- reduce phase:
During the reduce phase, the `by` data is moved from the `index` into the
data. The names of those inserted become the `by` for the reduce phase of
the groupby. Once applied, we drop the columns in all partitions after the
first so we do not insert the data multiple times. We also avoid inserting
data when the data from the `by` parameter did not come from this object.
The columns can be derived externally but the new index must be computed
post hoc.
Args:
by: The query compiler object to groupby.
axis: The axis to groupby. Must be 0 currently.
groupby_args: The arguments for the groupby component.
map_func: The function to perform during the map phase.
map_args: The arguments for the `map_func`.
reduce_func: The function to perform during the reduce phase.
reduce_args: The arguments for `reduce_func`.
numeric_only: Whether to drop non-numeric columns.
drop: Whether the data in `by` was dropped.
Returns:
A new Query Compiler
"""
assert isinstance(by, type(self)), (
"Can only use groupby reduce with another Query Compiler"
)
assert axis == 0, "Can only groupby reduce with axis=0"
if numeric_only:
qc = self.getitem_column_array(self._modin_frame._numeric_columns(True))
else:
qc = self
as_index = groupby_args.get("as_index", True)
# For simplicity we allow only one function to be passed in if both are the
# same.
if reduce_func is None:
reduce_func = map_func
reduce_args = map_args
def _map(df, other):
def compute_map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(
df.groupby(by=other, axis=axis, **groupby_args), **map_args
)
# The _modin_groupby_ prefix indicates that this is the first partition,
# and since we may need to insert the grouping data in the reduce phase
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
try:
return compute_map(df, other)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_map(df.copy(), other.copy())
def _reduce(df):
def compute_reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
if isinstance(by_part, str) and by_part in result.columns:
if "_modin_groupby_" in by_part and drop:
col_name = by_part[len("_modin_groupby_") :]
new_result = result.drop(columns=col_name)
new_result.columns = [
col_name if "_modin_groupby_" in c else c
for c in new_result.columns
]
return new_result
else:
return result.drop(columns=by_part)
return result
try:
return compute_reduce(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_reduce(df.copy())
if axis == 0:
new_columns = qc.columns
new_index = None
else:
new_index = self.index
new_columns = None
new_modin_frame = qc._modin_frame.groupby_reduce(
axis,
by._modin_frame,
_map,
_reduce,
new_columns=new_columns,
new_index=new_index,
)
return self.__constructor__(new_modin_frame)
|
def groupby_reduce(
self,
by,
axis,
groupby_args,
map_func,
map_args,
reduce_func=None,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Apply a Groupby via MapReduce pattern.
Note: Result length will be the number of unique values in `by`.
Currently, here is how this is implemented:
- map phase:
During the map phase we set `as_index` to True to force the `by` into the
index for the next phase. We always do this so that the reduce phase has
complete access of the `by` data without having to shuffle it twice. The map
function is applied with the arguments provided. The `index` of the
partitions will become the new `by` column. Sometimes, the name of `by` is
the same as a data column. In these cases we add "_modin_groupby_" to the
name of the index. This does not happen when grouping by multiple columns
because those columns have already been dropped as a requirement.
- reduce phase:
During the reduce phase, the `by` data is moved from the `index` into the
data. The names of those inserted become the `by` for the reduce phase of
the groupby. Once applied, we drop the columns in all partitions after the
first so we do not insert the data multiple times. We also avoid inserting
data when the data from the `by` parameter did not come from this object.
The columns can be derived externally but the new index must be computed
post hoc.
Args:
by: The query compiler object to groupby.
axis: The axis to groupby. Must be 0 currently.
groupby_args: The arguments for the groupby component.
map_func: The function to perform during the map phase.
map_args: The arguments for the `map_func`.
reduce_func: The function to perform during the reduce phase.
reduce_args: The arguments for `reduce_func`.
numeric_only: Whether to drop non-numeric columns.
drop: Whether the data in `by` was dropped.
Returns:
A new Query Compiler
"""
assert isinstance(by, type(self)), (
"Can only use groupby reduce with another Query Compiler"
)
assert axis == 0, "Can only groupby reduce with axis=0"
if numeric_only:
qc = self.getitem_column_array(self._modin_frame._numeric_columns(True))
else:
qc = self
first_column = qc.columns[0]
as_index = groupby_args.get("as_index", True)
# When drop is False and as_index is False, we do not want to insert the `by`
# data as a new column in the dataframe. We will drop it.
drop_by = not drop
# For simplicity we allow only one function to be passed in if both are the
# same.
if reduce_func is None:
reduce_func = map_func
reduce_args = map_args
def _map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(df.groupby(by=other, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
def _reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
# Avoid inserting data after the first partition or if the data did not come
# from this query compiler.
if not as_index and (first_column not in df.columns or drop_by):
return result.drop(columns=by_part)
return result
if axis == 0:
if not as_index and drop:
new_columns = by.columns.append(qc.columns)
else:
new_columns = qc.columns
new_index = None
else:
new_index = self.index
new_columns = None
new_modin_frame = qc._modin_frame.groupby_reduce(
axis,
by._modin_frame,
_map,
_reduce,
new_columns=new_columns,
new_index=new_index,
)
return self.__constructor__(new_modin_frame)
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def _map(df, other):
def compute_map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(df.groupby(by=other, axis=axis, **groupby_args), **map_args)
# The _modin_groupby_ prefix indicates that this is the first partition,
# and since we may need to insert the grouping data in the reduce phase
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
try:
return compute_map(df, other)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_map(df.copy(), other.copy())
|
def _map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat([df] + [other[[o for o in other if o not in df]]], axis=1)
other = list(other.columns)
result = map_func(df.groupby(by=other, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def _reduce(df):
def compute_reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
if isinstance(by_part, str) and by_part in result.columns:
if "_modin_groupby_" in by_part and drop:
col_name = by_part[len("_modin_groupby_") :]
new_result = result.drop(columns=col_name)
new_result.columns = [
col_name if "_modin_groupby_" in c else c
for c in new_result.columns
]
return new_result
else:
return result.drop(columns=by_part)
return result
try:
return compute_reduce(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_reduce(df.copy())
|
def _reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
# Avoid inserting data after the first partition or if the data did not come
# from this query compiler.
if not as_index and (first_column not in df.columns or drop_by):
return result.drop(columns=by_part)
return result
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def apply(self, func, *args, **kwargs):
return self._apply_agg_function(
# Grouping column in never dropped in groupby.apply, so drop=False
lambda df: df.apply(func, *args, **kwargs),
drop=False,
)
|
def apply(self, func, *args, **kwargs):
return self._apply_agg_function(
lambda df: df.apply(func, *args, **kwargs), drop=self._as_index
)
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def __getitem__(self, key):
kwargs = self._kwargs.copy()
# Most of time indexing DataFrameGroupBy results in another DataFrameGroupBy object unless circumstances are
# special in which case SeriesGroupBy has to be returned. Such circumstances are when key equals to a single
# column name and is not a list of column names or list of one column name.
make_dataframe = True
if self._drop and self._as_index:
if not isinstance(key, list):
key = [key]
kwargs["squeeze"] = True
make_dataframe = False
# When `as_index` is False, pandas will always convert to a `DataFrame`, we
# convert to a list here so that the result will be a `DataFrame`.
elif not self._as_index and not isinstance(key, list):
# Sometimes `__getitem__` doesn't only get the item, it also gets the `by`
# column. This logic is here to ensure that we also get the `by` data so
# that it is there for `as_index=False`.
if (
isinstance(self._by, type(self._query_compiler))
and all(c in self._columns for c in self._by.columns)
and self._drop
):
key = [key] + list(self._by.columns)
else:
key = [key]
if isinstance(key, list) and (make_dataframe or not self._as_index):
return DataFrameGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=self._drop,
**kwargs,
)
return SeriesGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=False,
**kwargs,
)
|
def __getitem__(self, key):
kwargs = self._kwargs.copy()
# Most of time indexing DataFrameGroupBy results in another DataFrameGroupBy object unless circumstances are
# special in which case SeriesGroupBy has to be returned. Such circumstances are when key equals to a single
# column name and is not a list of column names or list of one column name.
make_dataframe = True
if self._drop:
if not isinstance(key, list):
key = [key]
kwargs["squeeze"] = True
make_dataframe = False
# When `as_index` is False, pandas will always convert to a `DataFrame`, we
# convert to a list here so that the result will be a `DataFrame`.
elif not self._as_index and not isinstance(key, list):
key = [key]
if isinstance(key, list) and (make_dataframe or not self._as_index):
return DataFrameGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=self._drop,
**kwargs,
)
return SeriesGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=False,
**kwargs,
)
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def size(self):
if self._axis == 0:
if self._as_index:
work_object = self[self._df.columns[0]]
else:
# Size always works in as_index=True mode so it is necessary to make a copy
# of _kwargs and change as_index in it
kwargs = self._kwargs.copy()
kwargs["as_index"] = True
kwargs["squeeze"] = True
work_object = SeriesGroupBy(
self._df[self._df.columns[0]],
self._by,
self._axis,
idx_name=self._idx_name,
drop=False,
**kwargs,
)
result = work_object._groupby_reduce(
lambda df: pandas.DataFrame(df.size()),
lambda df: df.sum(),
numeric_only=False,
)
series_result = Series(query_compiler=result._query_compiler)
# Pandas does not name size() output
series_result.name = None
return series_result
else:
return DataFrameGroupBy(
self._df.T,
self._by,
0,
drop=self._drop,
idx_name=self._idx_name,
**self._kwargs,
).size()
|
def size(self):
return pandas.Series({k: len(v) for k, v in self._index_grouped.items()})
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def _groupby_reduce(
self, map_func, reduce_func, drop=True, numeric_only=True, **kwargs
):
if self._is_multi_by and not isinstance(self._by, type(self._query_compiler)):
return self._default_to_pandas(map_func, **kwargs)
if not isinstance(self._by, type(self._query_compiler)):
return self._apply_agg_function(map_func, drop=drop, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if drop and self._drop:
if self._as_index:
groupby_qc = self._query_compiler.drop(columns=self._by.columns)
else:
groupby_qc = self._query_compiler
else:
groupby_qc = self._query_compiler
result = type(self._df)(
query_compiler=groupby_qc.groupby_reduce(
self._by,
self._axis,
self._kwargs,
map_func,
kwargs,
reduce_func=reduce_func,
reduce_args=kwargs,
numeric_only=numeric_only,
drop=self._drop,
)
)
if self._kwargs.get("squeeze", False):
return result.squeeze()
return result
|
def _groupby_reduce(
self, map_func, reduce_func, drop=True, numeric_only=True, **kwargs
):
if self._is_multi_by and not isinstance(self._by, type(self._query_compiler)):
return self._default_to_pandas(map_func, **kwargs)
if not isinstance(self._by, type(self._query_compiler)):
return self._apply_agg_function(map_func, drop=drop, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop and self._drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
result = type(self._df)(
query_compiler=groupby_qc.groupby_reduce(
self._by,
self._axis,
self._kwargs,
map_func,
kwargs,
reduce_func=reduce_func,
reduce_args=kwargs,
numeric_only=numeric_only,
drop=self._drop,
)
)
if self._kwargs.get("squeeze", False):
return result.squeeze()
return result
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def _iter(self):
group_ids = self._index_grouped.keys()
if self._axis == 0:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_row_array(
self._index.get_indexer_for(self._index_grouped[k].unique())
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
else:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_column_array(
self._index_grouped[k].unique()
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
|
def _iter(self):
from .dataframe import Series
group_ids = self._index_grouped.keys()
if self._axis == 0:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_row_array(
self._index.get_indexer_for(self._index_grouped[k].unique())
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
else:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_column_array(
self._index_grouped[k].unique()
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
|
https://github.com/modin-project/modin/issues/1563
|
Traceback (most recent call last):
File "groupby_test16.py", line 29, in <module>
df1 = gb.prod()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 333, in prod
return self._groupby_reduce(callable, None)
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 582, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1298, in groupby_reduce
new_modin_frame = qc._modin_frame.groupby_reduce(
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1259, in groupby_reduce
new_index = self._frame_mgr_cls.get_indices(
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 87, in get_indices
new_idx = ray.get(new_idx)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::modin.engines.ray.pandas_on_ray.frame.partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.axis_partition.deploy_ray_func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 427, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
ray.exceptions.RayTaskError: ray::modin.engines.ray.pandas_on_ray.frame.partition_manager.func() (pid=2588931, ip=10.241.129.19)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "/localdisk/gashiman/modin/modin/engines/ray/pandas_on_ray/frame/partition_manager.py", line 44, in func
return apply_func(df, other)
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1248, in _map
result = map_func(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 332, in callable
return df.prod(**kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 1371, in f
return self._cython_agg_general(alias, alt=npfunc, **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 993, in _cython_agg_general
agg_blocks, agg_items = self._cython_agg_blocks(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 1022, in _cython_agg_blocks
result, _ = self.grouper.aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 586, in aggregate
return self._cython_operation(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 529, in _cython_operation
result = self._aggregate(
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/groupby/ops.py", line 608, in _aggregate
agg_func(result, counts, values, comp_ids, min_count)
File "pandas/_libs/groupby.pyx", line 511, in pandas._libs.groupby._group_prod
File "stringsource", line 658, in View.MemoryView.memoryview_cwrapper
File "stringsource", line 349, in View.MemoryView.memoryview.__cinit__
ValueError: buffer source array is read-only
|
ray.exceptions.RayTaskError
|
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze().dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
|
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze().dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
|
https://github.com/modin-project/modin/issues/1525
|
'Traceback (most recent call last):\n
File "/home/adrian.arroyo/.vscode-server/extensions/ms-python.python-2020.5.80290/pythonFiles/lib/python/debugpy/no_wheels/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py", line 193, in _get_py_dictionary\n
attr = getattr(var, name)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 3381, in __getattribute__\n
method = object.__getattribute__(self, item)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/series.py", line 349, in values\n
return super(Series, self).to_numpy().flatten()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 2981, in to_numpy\n
arr = self._query_compiler.to_numpy()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 182, in to_numpy\n
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/error_message.py", line 53, in catch_bugs_and_request_email\n
" caused this error.\\n{}".format(extra_log)\n
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.\n\n'
|
Exception
|
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze().dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
|
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze().dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
else:
return pandas.DataFrame([prop_val])
|
https://github.com/modin-project/modin/issues/1525
|
'Traceback (most recent call last):\n
File "/home/adrian.arroyo/.vscode-server/extensions/ms-python.python-2020.5.80290/pythonFiles/lib/python/debugpy/no_wheels/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py", line 193, in _get_py_dictionary\n
attr = getattr(var, name)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 3381, in __getattribute__\n
method = object.__getattribute__(self, item)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/series.py", line 349, in values\n
return super(Series, self).to_numpy().flatten()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 2981, in to_numpy\n
arr = self._query_compiler.to_numpy()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 182, in to_numpy\n
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/error_message.py", line 53, in catch_bugs_and_request_email\n
" caused this error.\\n{}".format(extra_log)\n
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.\n\n'
|
Exception
|
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v for k, v in kwargs.items()
}
result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
result = result.to_frame()
if isinstance(result, pandas.DataFrame):
return self.from_pandas(result, type(self._modin_frame))
else:
return result
|
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v for k, v in kwargs.items()
}
return self.from_pandas(
pandas_op(self.to_pandas(), *args, **kwargs), type(self._modin_frame)
)
|
https://github.com/modin-project/modin/issues/1525
|
'Traceback (most recent call last):\n
File "/home/adrian.arroyo/.vscode-server/extensions/ms-python.python-2020.5.80290/pythonFiles/lib/python/debugpy/no_wheels/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py", line 193, in _get_py_dictionary\n
attr = getattr(var, name)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 3381, in __getattribute__\n
method = object.__getattribute__(self, item)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/series.py", line 349, in values\n
return super(Series, self).to_numpy().flatten()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 2981, in to_numpy\n
arr = self._query_compiler.to_numpy()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 182, in to_numpy\n
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/error_message.py", line 53, in catch_bugs_and_request_email\n
" caused this error.\\n{}".format(extra_log)\n
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.\n\n'
|
Exception
|
def to_timestamp(self, *args, **kwargs):
return Series(query_compiler=self._query_compiler.dt_to_timestamp(*args, **kwargs))
|
def to_timestamp(self, freq=None, how="start", copy=True):
return self._default_to_pandas("to_timestamp", freq=freq, how=how, copy=copy)
|
https://github.com/modin-project/modin/issues/1525
|
'Traceback (most recent call last):\n
File "/home/adrian.arroyo/.vscode-server/extensions/ms-python.python-2020.5.80290/pythonFiles/lib/python/debugpy/no_wheels/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py", line 193, in _get_py_dictionary\n
attr = getattr(var, name)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 3381, in __getattribute__\n
method = object.__getattribute__(self, item)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/series.py", line 349, in values\n
return super(Series, self).to_numpy().flatten()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/pandas/base.py", line 2981, in to_numpy\n
arr = self._query_compiler.to_numpy()\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 182, in to_numpy\n
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)\n
File "/home/adrian.arroyo/.local/lib/python3.6/site-packages/modin/error_message.py", line 53, in catch_bugs_and_request_email\n
" caused this error.\\n{}".format(extra_log)\n
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.\n\n'
|
Exception
|
def __getitem__(self, key):
kwargs = self._kwargs.copy()
# Most of time indexing DataFrameGroupBy results in another DataFrameGroupBy object unless circumstances are
# special in which case SeriesGroupBy has to be returned. Such circumstances are when key equals to a single
# column name and is not a list of column names or list of one column name.
make_dataframe = True
if self._drop:
if not isinstance(key, list):
key = [key]
kwargs["squeeze"] = True
make_dataframe = False
# When `as_index` is False, pandas will always convert to a `DataFrame`, we
# convert to a list here so that the result will be a `DataFrame`.
elif not self._as_index and not isinstance(key, list):
key = [key]
if isinstance(key, list) and (make_dataframe or not self._as_index):
return DataFrameGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=self._drop,
**kwargs,
)
return SeriesGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=False,
**kwargs,
)
|
def __getitem__(self, key):
kwargs = self._kwargs.copy()
if self._drop:
if not isinstance(key, list):
key = [key]
kwargs["squeeze"] = True
# When `as_index` is False, pandas will always convert to a `DataFrame`, we
# convert to a list here so that the result will be a `DataFrame`.
elif not self._as_index and not isinstance(key, list):
key = [key]
if isinstance(key, list):
return DataFrameGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=self._drop,
**kwargs,
)
return SeriesGroupBy(
self._df[key],
self._by,
self._axis,
idx_name=self._idx_name,
drop=False,
**kwargs,
)
|
https://github.com/modin-project/modin/issues/1509
|
Traceback (most recent call last):
File "groupby_test.py", line 8, in <module>
df1 = df.groupby('max_speed')['max_speed'].count()
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 452, in count
return self._groupby_reduce(
File "/localdisk/gashiman/modin/modin/pandas/groupby.py", line 519, in _groupby_reduce
query_compiler=groupby_qc.groupby_reduce(
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 1169, in groupby_reduce
first_column = qc.columns[0]
File "/nfs/site/home/gashiman/.local/lib/python3.8/site-packages/pandas/core/indexes/base.py", line 3929, in __getitem__
return getitem(key)
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def __setitem__(self, key, value):
if key not in self.columns:
# Handle new column case first
if isinstance(value, Series):
if len(self.columns) == 0:
self._query_compiler = value._query_compiler.copy()
else:
self._create_or_update_from_compiler(
self._query_compiler.concat(1, value._query_compiler),
inplace=True,
)
# Now that the data is appended, we need to update the column name for
# that column to `key`, otherwise the name could be incorrect. Drop the
# last column name from the list (the appended value's name and append
# the new name.
self.columns = self.columns[:-1].append(pandas.Index([key]))
elif (
isinstance(value, np.ndarray)
and len(value.shape) > 1
and value.shape[1] != 1
):
raise ValueError(
"Wrong number of items passed %i, placement implies 1" % value.shape[1]
)
elif isinstance(value, (pandas.DataFrame, DataFrame)) and value.shape[1] != 1:
raise ValueError(
"Wrong number of items passed %i, placement implies 1" % value.shape[1]
)
else:
self.insert(loc=len(self.columns), column=key, value=value)
return
if not isinstance(key, str):
def setitem_without_string_columns(df):
# Arrow makes memory-mapped objects immutable, so copy will allow them
# to be mutable again.
df = df.copy(True)
df[key] = value
return df
return self._update_inplace(
self._default_to_pandas(setitem_without_string_columns)._query_compiler
)
if is_list_like(value):
if isinstance(value, (pandas.DataFrame, DataFrame)):
value = value[value.columns[0]].values
elif isinstance(value, np.ndarray):
assert len(value.shape) < 3, (
"Shape of new values must be compatible with manager shape"
)
value = value.T.reshape(-1)
if len(self) > 0:
value = value[: len(self)]
if not isinstance(value, Series):
value = list(value)
if len(self.index) == 0:
new_self = DataFrame({key: value}, columns=self.columns)
self._update_inplace(new_self._query_compiler)
else:
if isinstance(value, Series):
value = value._query_compiler
self._update_inplace(self._query_compiler.setitem(0, key, value))
|
def __setitem__(self, key, value):
if not isinstance(key, str):
def setitem_without_string_columns(df):
# Arrow makes memory-mapped objects immutable, so copy will allow them
# to be mutable again.
df = df.copy(True)
df[key] = value
return df
return self._update_inplace(
self._default_to_pandas(setitem_without_string_columns)._query_compiler
)
if is_list_like(value):
if isinstance(value, (pandas.DataFrame, DataFrame)):
if value.shape[1] != 1 and key not in self.columns:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
value = value[value.columns[0]].values
elif isinstance(value, np.ndarray):
if len(value.shape) > 1 and value.shape[1] != 1 and key not in self.columns:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
assert len(value.shape) < 3, (
"Shape of new values must be compatible with manager shape"
)
value = value.T.reshape(-1)
if len(self) > 0:
value = value[: len(self)]
if not isinstance(value, Series):
value = list(value)
if key not in self.columns:
if isinstance(value, Series):
if len(self.columns) == 0:
self._query_compiler = value._query_compiler.copy()
else:
self._create_or_update_from_compiler(
self._query_compiler.concat(1, value._query_compiler),
inplace=True,
)
# Now that the data is appended, we need to update the column name for
# that column to `key`, otherwise the name could be incorrect. Drop the
# last column name from the list (the appended value's name and append
# the new name.
self.columns = self.columns[:-1].append(pandas.Index([key]))
else:
self.insert(loc=len(self.columns), column=key, value=value)
elif len(self.index) == 0:
new_self = DataFrame({key: value}, columns=self.columns)
self._update_inplace(new_self._query_compiler)
else:
if isinstance(value, Series):
value = value._query_compiler
self._update_inplace(self._query_compiler.setitem(0, key, value))
|
https://github.com/modin-project/modin/issues/1490
|
Traceback (most recent call last):
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/frame.py", line 3054, in _ensure_valid_index
value = Series(value)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/series.py", line 256, in __init__
data, index = self._init_dict(data, index, dtype)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/series.py", line 334, in _init_dict
if data:
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 3343, in __nonzero__
self.__class__.__name__
ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test_setitem.py", line 5, in <module>
df[0] = se
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 2072, in __setitem__
self._default_to_pandas(setitem_without_string_columns)._query_compiler
File "/localdisk/gashiman/modin/modin/pandas/base.py", line 245, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 2068, in setitem_without_string_columns
df[key] = value
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/frame.py", line 2938, in __setitem__
self._set_item(key, value)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/frame.py", line 2999, in _set_item
self._ensure_valid_index(value)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/frame.py", line 3057, in _ensure_valid_index
"Cannot set a frame with no defined index "
ValueError: Cannot set a frame with no defined index and a value that cannot be converted to a Series
|
ValueError
|
def read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
# TODO: replace this by reading lines from file.
if kwargs.get("nrows") is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0, index_col=None)
).columns
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols = kwargs.get("usecols", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
for _ in range(skiprows):
f.readline()
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
total_bytes = cls.file_size(f)
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# This is the chunksize each partition will read
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
while f.tell() < total_bytes:
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
partition_id = cls.call_deploy(
f, chunk_size, num_splits + 2, args, quotechar=quotechar
)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
# pandas has a really weird edge case here.
if kwargs.get("names", None) is not None and skiprows > 1:
new_index = pandas.RangeIndex(skiprows - 1, new_index.stop + skiprows - 1)
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids)
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
def read(cls, filepath_or_buffer, **kwargs):
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
filepath_or_buffer = cls.get_path(filepath_or_buffer)
elif not pathlib_or_pypath(filepath_or_buffer):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
compression_type = cls.infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if compression_type is not None:
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
kwargs["compression"] = compression_type
else:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
return cls.single_worker_read(filepath_or_buffer, **kwargs)
# TODO: replace this by reading lines from file.
if kwargs.get("nrows") is not None:
return cls.single_worker_read(filepath_or_buffer, **kwargs)
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
names = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0, index_col=None)
).columns
empty_pd_df = pandas.read_csv(
filepath_or_buffer, **dict(kwargs, nrows=0, skipfooter=0)
)
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols = kwargs.get("usecols", None)
usecols_md = _validate_usecols_arg(usecols)
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
cls.file_open(filepath_or_buffer, "rb"),
**dict(kwargs, nrows=0, skipfooter=0),
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
encoding = kwargs.get("encoding", None)
quotechar = kwargs.get("quotechar", '"').encode(
encoding if encoding is not None else "UTF-8"
)
with cls.file_open(filepath_or_buffer, "rb", compression_type) as f:
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
if isinstance(skiprows, int) or skiprows is None:
if skiprows is None:
skiprows = 0
header = kwargs.get("header", "infer")
if header == "infer" and kwargs.get("names", None) is None:
skiprows += 1
elif isinstance(header, int):
skiprows += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
skiprows += max(header) + 1
for _ in range(skiprows):
f.readline()
if kwargs.get("encoding", None) is not None:
partition_kwargs["skiprows"] = 1
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
total_bytes = cls.file_size(f)
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# This is the chunksize each partition will read
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
while f.tell() < total_bytes:
args = {
"fname": filepath_or_buffer,
"num_splits": num_splits,
**partition_kwargs,
}
partition_id = cls.call_deploy(
f, chunk_size, num_splits + 2, args, quotechar=quotechar
)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
# pandas has a really weird edge case here.
if kwargs.get("names", None) is not None and skiprows > 1:
new_index = pandas.RangeIndex(skiprows - 1, new_index.stop + skiprows - 1)
else:
index_objs = cls.materialize(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = cls.get_dtypes(dtypes_ids)
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# We have to recompute the column widths if `parse_dates` is set because
# we are not guaranteed to have the correct information regarding how many
# columns are on each partition.
column_widths = None
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
|
https://github.com/modin-project/modin/issues/1379
|
Traceback (most recent call last):
File "code.py", line 90, in runcode
exec(code, self.locals)
File "<input>", line 2, in <module>
File "/modin/pandas/io.py", line 143, in read_json
return DataFrame(query_compiler=BaseFactory.read_json(**kwargs))
File "/modin/data_management/factories.py", line 60, in read_json
return cls._determine_engine()._read_json(**kwargs)
File "/modin/data_management/factories.py", line 64, in _read_json
return cls.io_cls.read_json(**kwargs)
File "/modin/engines/base/io/text/json_reader.py", line 13, in read
return cls.single_worker_read(path_or_buf, **kwargs)
File "/modin/backends/pandas/parsers.py", line 57, in single_worker_read
pandas_frame = cls.parse(fname, **kwargs)
File "/modin/backends/pandas/parsers.py", line 117, in parse
return pandas.read_json(fname, **kwargs)
File "/pandas/util/_decorators.py", line 186, in wrapper
return func(*args, **kwargs)
File "/pandas/io/json/_json.py", line 608, in read_json
result = json_reader.read()
File "/pandas/io/json/_json.py", line 731, in read
obj = self._get_object_parser(self.data)
File "/pandas/io/json/_json.py", line 753, in _get_object_parser
obj = FrameParser(json, **kwargs).parse()
File "/pandas/io/json/_json.py", line 857, in parse
self._parse_no_numpy()
File "/pandas/io/json/_json.py", line 1089, in _parse_no_numpy
loads(json, precise_float=self.precise_float), dtype=None
ValueError: Expected object or value
|
ValueError
|
def read(cls, path_or_buf, **kwargs):
if isinstance(path_or_buf, str):
if not cls.file_exists(path_or_buf):
return cls.single_worker_read(path_or_buf, **kwargs)
path_or_buf = cls.get_path(path_or_buf)
elif not cls.pathlib_or_pypath(path_or_buf):
return cls.single_worker_read(path_or_buf, **kwargs)
if not kwargs.get("lines", False):
return cls.single_worker_read(path_or_buf, **kwargs)
columns = pandas.read_json(
BytesIO(b"" + open(path_or_buf, "rb").readline()), lines=True
).columns
kwargs["columns"] = columns
empty_pd_df = pandas.DataFrame(columns=columns)
with cls.file_open(path_or_buf, "rb", kwargs.get("compression", "infer")) as f:
total_bytes = cls.file_size(f)
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
num_splits = min(len(columns), num_partitions)
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
partition_ids = []
index_ids = []
dtypes_ids = []
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(columns):
column_widths = [len(columns)]
num_splits = 1
else:
column_widths = [
column_chunksize
if i != num_splits - 1
else len(columns) - (column_chunksize * (num_splits - 1))
for i in range(num_splits)
]
while f.tell() < total_bytes:
start = f.tell()
args = {"fname": path_or_buf, "num_splits": num_splits, "start": start}
args.update(kwargs)
partition_id = cls.call_deploy(f, chunk_size, num_splits + 3, args)
partition_ids.append(partition_id[:-3])
index_ids.append(partition_id[-3])
dtypes_ids.append(partition_id[-2])
# partition_id[-1] contains the columns for each partition, which will be useful
# for implementing when `lines=False`.
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
dtypes = cls.get_dtypes(dtypes_ids)
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
if isinstance(dtypes, pandas.Series):
dtypes.index = columns
else:
dtypes = pandas.Series(dtypes, index=columns)
new_frame = cls.frame_cls(
np.array(partition_ids),
new_index,
columns,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_frame._apply_index_objs(axis=0)
return cls.query_compiler_cls(new_frame)
|
def read(cls, path_or_buf, **kwargs):
path_or_buf = cls.get_path(path_or_buf)
if not kwargs.get("lines", False):
return cls.single_worker_read(path_or_buf, **kwargs)
columns = pandas.read_json(
BytesIO(b"" + open(path_or_buf, "rb").readline()), lines=True
).columns
kwargs["columns"] = columns
empty_pd_df = pandas.DataFrame(columns=columns)
with cls.file_open(path_or_buf, "rb", kwargs.get("compression", "infer")) as f:
total_bytes = cls.file_size(f)
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
num_splits = min(len(columns), num_partitions)
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
partition_ids = []
index_ids = []
dtypes_ids = []
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(columns):
column_widths = [len(columns)]
num_splits = 1
else:
column_widths = [
column_chunksize
if i != num_splits - 1
else len(columns) - (column_chunksize * (num_splits - 1))
for i in range(num_splits)
]
while f.tell() < total_bytes:
start = f.tell()
args = {"fname": path_or_buf, "num_splits": num_splits, "start": start}
args.update(kwargs)
partition_id = cls.call_deploy(f, chunk_size, num_splits + 3, args)
partition_ids.append(partition_id[:-3])
index_ids.append(partition_id[-3])
dtypes_ids.append(partition_id[-2])
# partition_id[-1] contains the columns for each partition, which will be useful
# for implementing when `lines=False`.
row_lengths = cls.materialize(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
dtypes = cls.get_dtypes(dtypes_ids)
partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)
if isinstance(dtypes, pandas.Series):
dtypes.index = columns
else:
dtypes = pandas.Series(dtypes, index=columns)
new_frame = cls.frame_cls(
np.array(partition_ids),
new_index,
columns,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_frame._apply_index_objs(axis=0)
return cls.query_compiler_cls(new_frame)
|
https://github.com/modin-project/modin/issues/1379
|
Traceback (most recent call last):
File "code.py", line 90, in runcode
exec(code, self.locals)
File "<input>", line 2, in <module>
File "/modin/pandas/io.py", line 143, in read_json
return DataFrame(query_compiler=BaseFactory.read_json(**kwargs))
File "/modin/data_management/factories.py", line 60, in read_json
return cls._determine_engine()._read_json(**kwargs)
File "/modin/data_management/factories.py", line 64, in _read_json
return cls.io_cls.read_json(**kwargs)
File "/modin/engines/base/io/text/json_reader.py", line 13, in read
return cls.single_worker_read(path_or_buf, **kwargs)
File "/modin/backends/pandas/parsers.py", line 57, in single_worker_read
pandas_frame = cls.parse(fname, **kwargs)
File "/modin/backends/pandas/parsers.py", line 117, in parse
return pandas.read_json(fname, **kwargs)
File "/pandas/util/_decorators.py", line 186, in wrapper
return func(*args, **kwargs)
File "/pandas/io/json/_json.py", line 608, in read_json
result = json_reader.read()
File "/pandas/io/json/_json.py", line 731, in read
obj = self._get_object_parser(self.data)
File "/pandas/io/json/_json.py", line 753, in _get_object_parser
obj = FrameParser(json, **kwargs).parse()
File "/pandas/io/json/_json.py", line 857, in parse
self._parse_no_numpy()
File "/pandas/io/json/_json.py", line 1089, in _parse_no_numpy
loads(json, precise_float=self.precise_float), dtype=None
ValueError: Expected object or value
|
ValueError
|
def read_gbq(
cls,
query: str,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=None,
private_key=None,
verbose=None,
progress_bar_type=None,
):
ErrorMessage.default_to_pandas("`read_gbq`")
return cls.from_pandas(
pandas.read_gbq(
query,
project_id=project_id,
index_col=index_col,
col_order=col_order,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
dialect=dialect,
location=location,
configuration=configuration,
credentials=credentials,
use_bqstorage_api=use_bqstorage_api,
private_key=private_key,
verbose=verbose,
progress_bar_type=progress_bar_type,
)
)
|
def read_gbq(
cls,
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
private_key=None,
verbose=None,
):
ErrorMessage.default_to_pandas("`read_gbq`")
return cls.from_pandas(
pandas.read_gbq(
query,
project_id=project_id,
index_col=index_col,
col_order=col_order,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
dialect=dialect,
location=location,
configuration=configuration,
credentials=credentials,
private_key=private_key,
verbose=verbose,
)
)
|
https://github.com/modin-project/modin/issues/1398
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/app/modin-project-modin-05754d9/modin/pandas/io.py", line 186, in read_gbq
return DataFrame(query_compiler=BaseFactory.read_gbq(**kwargs))
File "/app/modin-project-modin-05754d9/modin/data_management/factories.py", line 81, in read_gbq
return cls._determine_engine()._read_gbq(**kwargs)
File "/app/modin-project-modin-05754d9/modin/data_management/factories.py", line 87, in _read_gbq
return cls.io_cls.read_gbq(**kwargs)
TypeError: read_gbq() got an unexpected keyword argument 'progress_bar_type'
|
TypeError
|
def _getitem_slice(self, key):
if key.start is None and key.stop is None:
return self.copy()
return self.iloc[key]
|
def _getitem_slice(self, key):
# If there is no step, we can fasttrack the codepath to use existing logic from
# head and tail, which is already pretty fast.
if (
key.step is None
and (isinstance(key.start, int) or key.start is None)
and (isinstance(key.stop, int) or key.stop is None)
):
if key.start is None and key.stop is None:
return self.copy()
def compute_offset(value):
return (
value - len(self) if value > 0 else value if value != 0 else len(self)
)
# Head is a negative number, Tail is a positive number
if key.start is None:
return self.head(compute_offset(key.stop))
elif key.stop is None:
return self.tail(compute_offset(-key.start))
return self.head(compute_offset(key.stop)).tail(compute_offset(-key.start))
# We convert to a RangeIndex because getitem_row_array is expecting a list
# of indices, and RangeIndex will give us the exact indices of each boolean
# requested.
if isinstance(key.start, int) and isinstance(key.stop, int):
key = pandas.RangeIndex(len(self.index))[key]
else:
# To handle this case correctly, we let pandas compute the indices by
# creating a MultiIndex ("a", "b") such that "a" is the original index and
# "b" is the Range of numeric indices for each. Then we apply the slice to
# the original index (named "a"), then extract the index values (named "b")
# and pass that along to be sliced in the actual data. This also serves as a
# validation that the input slice is usable and correct.
key = pandas.DataFrame(
index=pandas.MultiIndex.from_arrays(
[self.index, pandas.RangeIndex(len(self.index))], names=["a", "b"]
)
)[key].index.get_level_values("b")
return self.__constructor__(
query_compiler=self._query_compiler.getitem_row_array(key)
)
|
https://github.com/modin-project/modin/issues/1383
|
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-6-525f4cb2bfb2> in <module>
----> 1 c[3:7]
~/software_builds/modin/modin/pandas/base.py in __getitem__(self, key)
3254 )
3255 if indexer is not None:
-> 3256 return self._getitem_slice(indexer)
3257 else:
3258 return self._getitem(key)
~/software_builds/modin/modin/pandas/base.py in _getitem_slice(self, key)
3283 elif key.stop is None:
3284 return self.tail(compute_offset(-key.start))
-> 3285 return self.head(compute_offset(key.stop)).tail(compute_offset(-key.start))
3286 # We convert to a RangeIndex because getitem_row_array is expecting a list
3287 # of indices, and RangeIndex will give us the exact indices of each boolean
~/software_builds/modin/modin/pandas/series.py in tail(self, n)
1052 if n == 0:
1053 return Series(dtype=self.dtype)
-> 1054 return super(Series, self).tail(n)
1055
1056 def take(self, indices, axis=0, is_copy=None, **kwargs):
~/software_builds/modin/modin/pandas/base.py in tail(self, n)
2789 if n >= len(self.index):
2790 return self.copy()
-> 2791 return self.__constructor__(query_compiler=self._query_compiler.tail(n))
2792
2793 def to_clipboard(self, excel=True, sep=None, **kwargs): # pragma: no cover
~/software_builds/modin/modin/backends/pandas/query_compiler.py in tail(self, n)
808 QueryCompiler containing the last n rows of the original QueryCompiler.
809 """
--> 810 return self.__constructor__(self._modin_frame.tail(n))
811
812 def front(self, n):
~/software_builds/modin/modin/engines/base/frame/data.py in tail(self, n)
1355 new_row_lengths,
1356 self._column_widths,
-> 1357 self._dtypes,
1358 )
1359
~/software_builds/modin/modin/engines/base/frame/data.py in __init__(self, partitions, index, columns, row_lengths, column_widths, dtypes)
62 sum(row_lengths) != len(self._index_cache),
63 "Row lengths: {} != {}".format(
---> 64 sum(row_lengths), len(self._index_cache)
65 ),
66 )
~/software_builds/modin/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition, extra_log)
51 "Internal Error. "
52 "Please email bug_reports@modin.org with the traceback and command that"
---> 53 " caused this error.\n{}".format(extra_log)
54 )
55
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
Row lengths: 0 != 2
|
Exception
|
def __setitem__(self, key, value):
if not isinstance(key, str):
def setitem_without_string_columns(df):
# Arrow makes memory-mapped objects immutable, so copy will allow them
# to be mutable again.
df = df.copy(True)
df[key] = value
return df
return self._update_inplace(
self._default_to_pandas(setitem_without_string_columns)._query_compiler
)
if is_list_like(value):
if isinstance(value, (pandas.DataFrame, DataFrame)):
if value.shape[1] != 1 and key not in self.columns:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
value = value[value.columns[0]].values
elif isinstance(value, np.ndarray):
if len(value.shape) > 1 and value.shape[1] != 1 and key not in self.columns:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
assert len(value.shape) < 3, (
"Shape of new values must be compatible with manager shape"
)
value = value.T.reshape(-1)
if len(self) > 0:
value = value[: len(self)]
if not isinstance(value, Series):
value = list(value)
if key not in self.columns:
if isinstance(value, Series):
if len(self.columns) == 0:
self._query_compiler = value._query_compiler.copy()
else:
self._create_or_update_from_compiler(
self._query_compiler.concat(1, value._query_compiler),
inplace=True,
)
# Now that the data is appended, we need to update the column name for
# that column to `key`, otherwise the name could be incorrect. Drop the
# last column name from the list (the appended value's name and append
# the new name.
self.columns = self.columns[:-1].append(pandas.Index([key]))
else:
self.insert(loc=len(self.columns), column=key, value=value)
elif len(self.index) == 0:
new_self = DataFrame({key: value}, columns=self.columns)
self._update_inplace(new_self._query_compiler)
else:
if isinstance(value, Series):
value = value._query_compiler
self._update_inplace(self._query_compiler.setitem(0, key, value))
|
def __setitem__(self, key, value):
if not isinstance(key, str):
def setitem_without_string_columns(df):
# Arrow makes memory-mapped objects immutable, so copy will allow them
# to be mutable again.
df = df.copy(True)
df[key] = value
return df
return self._update_inplace(
self._default_to_pandas(setitem_without_string_columns)._query_compiler
)
if is_list_like(value):
if isinstance(value, (pandas.DataFrame, DataFrame)):
if value.shape[1] != 1 and key not in self.columns:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
value = value[value.columns[0]].values
elif isinstance(value, np.ndarray):
if len(value.shape) > 1 and value.shape[1] != 1 and key not in self.columns:
raise ValueError(
"Wrong number of items passed %i, placement implies 1"
% value.shape[1]
)
assert len(value.shape) < 3, (
"Shape of new values must be compatible with manager shape"
)
value = value.T.reshape(-1)
if len(self) > 0:
value = value[: len(self)]
if not isinstance(value, Series):
value = list(value)
if key not in self.columns:
if isinstance(value, Series):
self._create_or_update_from_compiler(
self._query_compiler.concat(1, value._query_compiler), inplace=True
)
# Now that the data is appended, we need to update the column name for
# that column to `key`, otherwise the name could be incorrect. Drop the
# last column name from the list (the appended value's name and append
# the new name.
self.columns = self.columns[:-1].append(pandas.Index([key]))
else:
self.insert(loc=len(self.columns), column=key, value=value)
elif len(self.index) == 0:
new_self = DataFrame({key: value}, columns=self.columns)
self._update_inplace(new_self._query_compiler)
else:
if isinstance(value, Series):
value = value._query_compiler
self._update_inplace(self._query_compiler.setitem(0, key, value))
|
https://github.com/modin-project/modin/issues/1388
|
Traceback (most recent call last):
File "empty_df_test2.py", line 8, in <module>
df['id'] = pd.Series([1, 2])
File "/localdisk/gashiman/modin/modin/pandas/dataframe.py", line 2069, in __setitem__
self._query_compiler.concat(1, value._query_compiler), inplace=True
File "/localdisk/gashiman/modin/modin/backends/pandas/query_compiler.py", line 155, in concat
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
File "/localdisk/gashiman/modin/modin/engines/base/frame/data.py", line 1159, in _concat
new_partitions = self._frame_mgr_cls.concat(axis, left_parts, right_parts)
File "/localdisk/gashiman/modin/modin/engines/base/frame/partition_manager.py", line 303, in concat
return np.concatenate([left_parts] + right_parts, axis=axis)
File "<__array_function__ internals>", line 6, in concatenate
numpy.AxisError: axis 1 is out of bounds for array of dimension 1
|
numpy.AxisError
|
def groupby_reduce(
self,
by,
axis,
groupby_args,
map_func,
map_args,
reduce_func=None,
reduce_args=None,
numeric_only=True,
drop=False,
):
"""Apply a Groupby via MapReduce pattern.
Note: Result length will be the number of unique values in `by`.
Currently, here is how this is implemented:
- map phase:
During the map phase we set `as_index` to True to force the `by` into the
index for the next phase. We always do this so that the reduce phase has
complete access of the `by` data without having to shuffle it twice. The map
function is applied with the arguments provided. The `index` of the
partitions will become the new `by` column. Sometimes, the name of `by` is
the same as a data column. In these cases we add "_modin_groupby_" to the
name of the index. This does not happen when grouping by multiple columns
because those columns have already been dropped as a requirement.
- reduce phase:
During the reduce phase, the `by` data is moved from the `index` into the
data. The names of those inserted become the `by` for the reduce phase of
the groupby. Once applied, we drop the columns in all partitions after the
first so we do not insert the data multiple times. We also avoid inserting
data when the data from the `by` parameter did not come from this object.
The columns can be derived externally but the new index must be computed
post hoc.
Args:
by: The query compiler object to groupby.
axis: The axis to groupby. Must be 0 currently.
groupby_args: The arguments for the groupby component.
map_func: The function to perform during the map phase.
map_args: The arguments for the `map_func`.
reduce_func: The function to perform during the reduce phase.
reduce_args: The arguments for `reduce_func`.
numeric_only: Whether to drop non-numeric columns.
drop: Whether the data in `by` was dropped.
Returns:
A new Query Compiler
"""
assert isinstance(by, type(self)), (
"Can only use groupby reduce with another Query Compiler"
)
assert axis == 0, "Can only groupby reduce with axis=0"
if numeric_only:
qc = self.getitem_column_array(self._modin_frame._numeric_columns(True))
else:
qc = self
first_column = qc.columns[0]
as_index = groupby_args.get("as_index", True)
# When drop is False and as_index is False, we do not want to insert the `by`
# data as a new column in the dataframe. We will drop it.
drop_by = not drop
# For simplicity we allow only one function to be passed in if both are the
# same.
if reduce_func is None:
reduce_func = map_func
reduce_args = map_args
def _map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(df.groupby(by=other, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
def _reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
# Avoid inserting data after the first partition or if the data did not come
# from this query compiler.
if not as_index and (first_column not in df.columns or drop_by):
return result.drop(columns=by_part)
return result
if axis == 0:
if not as_index and drop:
new_columns = by.columns.append(qc.columns)
else:
new_columns = qc.columns
new_index = None
else:
new_index = self.index
new_columns = None
new_modin_frame = qc._modin_frame.groupby_reduce(
axis,
by._modin_frame,
_map,
_reduce,
new_columns=new_columns,
new_index=new_index,
)
return self.__constructor__(new_modin_frame)
|
def groupby_reduce(
self,
by,
axis,
groupby_args,
map_func,
map_args,
reduce_func=None,
reduce_args=None,
numeric_only=True,
drop=False,
):
assert isinstance(by, type(self)), (
"Can only use groupby reduce with another Query Compiler"
)
other_len = len(by.columns)
as_index = groupby_args.get("as_index", True)
def _map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat(
[df] + [other[[o for o in other if o not in df]]], axis=1
)
other = list(other.columns)
result = map_func(df.groupby(by=other, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result.reset_index(drop=False)
if reduce_func is not None:
def _reduce(df):
# See note above about setting `as_index`
groupby_args["as_index"] = True
if other_len > 1:
by = list(df.columns[0:other_len])
else:
by = df.columns[0]
result = reduce_func(
df.groupby(by=by, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
return result
else:
def _reduce(df):
# See note above about setting `as_index`
groupby_args["as_index"] = True
if other_len > 1:
by = list(df.columns[0:other_len])
else:
by = df.columns[0]
result = map_func(df.groupby(by=by, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
return result
if axis == 0:
new_columns = (
self.columns
if not numeric_only
else self._modin_frame._numeric_columns(True)
)
new_index = None
compute_qc = self.getitem_column_array(new_columns) if numeric_only else self
else:
new_index = self.index
new_columns = None
compute_qc = self
new_modin_frame = compute_qc._modin_frame.groupby_reduce(
axis,
by._modin_frame,
_map,
_reduce,
new_columns=new_columns,
new_index=new_index,
)
result = self.__constructor__(new_modin_frame)
# Reset `as_index` because it was edited inplace.
groupby_args["as_index"] = as_index
if as_index:
return result
else:
if result.index.name is None or result.index.name in result.columns:
drop = False
return result.reset_index(drop=not drop)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def _map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat([df] + [other[[o for o in other if o not in df]]], axis=1)
other = list(other.columns)
result = map_func(df.groupby(by=other, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result
|
def _map(df, other):
# Set `as_index` to True to track the metadata of the grouping object
# It is used to make sure that between phases we are constructing the
# right index and placing columns in the correct order.
groupby_args["as_index"] = True
other = other.squeeze(axis=axis ^ 1)
if isinstance(other, pandas.DataFrame):
df = pandas.concat([df] + [other[[o for o in other if o not in df]]], axis=1)
other = list(other.columns)
result = map_func(df.groupby(by=other, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and result.index.name in result.columns
):
result.index.name = "{}{}".format("_modin_groupby_", result.index.name)
return result.reset_index(drop=False)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def _reduce(df):
other_len = len(df.index.names)
df = df.reset_index(drop=False)
# See note above about setting `as_index`
groupby_args["as_index"] = as_index
if other_len > 1:
by_part = list(df.columns[0:other_len])
else:
by_part = df.columns[0]
result = reduce_func(
df.groupby(by=by_part, axis=axis, **groupby_args), **reduce_args
)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
# Avoid inserting data after the first partition or if the data did not come
# from this query compiler.
if not as_index and (first_column not in df.columns or drop_by):
return result.drop(columns=by_part)
return result
|
def _reduce(df):
# See note above about setting `as_index`
groupby_args["as_index"] = True
if other_len > 1:
by = list(df.columns[0:other_len])
else:
by = df.columns[0]
result = map_func(df.groupby(by=by, axis=axis, **groupby_args), **map_args)
if (
not isinstance(result.index, pandas.MultiIndex)
and result.index.name is not None
and "_modin_groupby_" in result.index.name
):
result.index.name = result.index.name[len("_modin_groupby_") :]
return result
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def groupby_reduce(
self, axis, by, map_func, reduce_func, new_index=None, new_columns=None
):
"""Groupby another dataframe and aggregate the result.
Args:
axis: The axis to groupby and aggregate over.
by: The dataframe to group by.
map_func: The map component of the aggregation.
reduce_func: The reduce component of the aggregation.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.groupby_reduce(
axis, self._partitions, by._partitions, map_func, reduce_func
)
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
return self.__constructor__(new_partitions, new_index, new_columns)
|
def groupby_reduce(
self, axis, by, map_func, reduce_func, new_index=None, new_columns=None
):
"""Groupby another dataframe and aggregate the result.
Args:
axis: The axis to groupby and aggregate over.
by: The dataframe to group by.
map_func: The map component of the aggregation.
reduce_func: The reduce component of the aggregation.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.groupby_reduce(
axis, self._partitions, by._partitions, map_func, reduce_func
)
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
if axis == 0:
new_widths = self._column_widths
new_lengths = None
else:
new_widths = None
new_lengths = self._row_lengths
return self.__constructor__(
new_partitions, new_index, new_columns, new_lengths, new_widths
)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to drop the data column before doing the
# groupby. The typical pandas behavior is to drop when the data came from this
# dataframe. When a string, Series directly from this dataframe, or list of
# strings is passed in, the data used for the groupby is dropped before the
# groupby takes place.
drop = False
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
isinstance(self.axes[axis], pandas.MultiIndex)
and by in self.axes[axis].names
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
drop = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if not isinstance(by, Series) and axis == 0 and all(o in self for o in by):
warnings.warn(
"Multi-column groupby is a new feature. "
"Please report any bugs/issues to bug_reports@modin.org."
)
by = self.__getitem__(by)._query_compiler
drop = True
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
)
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
drop = False
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
isinstance(self.axes[axis], pandas.MultiIndex)
and by in self.axes[axis].names
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if not isinstance(by, Series) and axis == 0 and all(o in self for o in by):
warnings.warn(
"Multi-column groupby is a new feature. "
"Please report any bugs/issues to bug_reports@modin.org."
)
by = self.__getitem__(by)._query_compiler
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def __init__(
self,
df,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
drop,
**kwargs,
):
self._axis = axis
self._idx_name = idx_name
self._df = df
self._query_compiler = self._df._query_compiler
self._index = self._query_compiler.index
self._columns = self._query_compiler.columns
self._by = by
self._drop = drop
if level is None and is_list_like(by) or isinstance(by, type(self._query_compiler)):
# This tells us whether or not there are multiple columns/rows in the groupby
self._is_multi_by = (
isinstance(by, type(self._query_compiler)) and len(by.columns) > 1
) or (
not isinstance(by, type(self._query_compiler))
and all(obj in self._df for obj in self._by)
and axis == 0
)
else:
self._is_multi_by = False
self._level = level
self._kwargs = {
"level": level,
"sort": sort,
"as_index": as_index,
"group_keys": group_keys,
"squeeze": squeeze,
}
self._kwargs.update(kwargs)
|
def __init__(
self,
df,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
drop,
**kwargs,
):
self._axis = axis
self._idx_name = idx_name
self._df = df
self._query_compiler = self._df._query_compiler
self._index = self._query_compiler.index
self._columns = self._query_compiler.columns
self._by = by
self._drop = drop
if (
level is None
and not isinstance(by, type(self._query_compiler))
and is_list_like(by)
):
# This tells us whether or not there are multiple columns/rows in the groupby
self._is_multi_by = all(obj in self._df for obj in self._by) and axis == 0
else:
self._is_multi_by = False
self._level = level
self._kwargs = {
"level": level,
"sort": sort,
"as_index": as_index,
"group_keys": group_keys,
"squeeze": squeeze,
}
self._kwargs.update(kwargs)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def _groupby_reduce(
self, map_func, reduce_func, drop=True, numeric_only=True, **kwargs
):
if self._is_multi_by and not isinstance(self._by, type(self._query_compiler)):
return self._default_to_pandas(map_func, **kwargs)
if not isinstance(self._by, type(self._query_compiler)):
return self._apply_agg_function(map_func, drop=drop, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop and self._drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
from .dataframe import DataFrame
return DataFrame(
query_compiler=groupby_qc.groupby_reduce(
self._by,
self._axis,
self._kwargs,
map_func,
kwargs,
reduce_func=reduce_func,
reduce_args=kwargs,
numeric_only=numeric_only,
drop=self._drop,
)
)
|
def _groupby_reduce(
self, map_func, reduce_func, drop=True, numeric_only=True, **kwargs
):
if self._is_multi_by:
return self._default_to_pandas(map_func, **kwargs)
if not isinstance(self._by, type(self._query_compiler)):
return self._apply_agg_function(map_func, drop=drop, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop and self._drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
from .dataframe import DataFrame
return DataFrame(
query_compiler=groupby_qc.groupby_reduce(
self._by,
self._axis,
self._kwargs,
map_func,
kwargs,
reduce_func=reduce_func,
reduce_args=kwargs,
numeric_only=numeric_only,
drop=self._drop,
)
)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def _apply_agg_function(self, f, drop=True, **kwargs):
"""Perform aggregation and combine stages based on a given function.
Args:
f: The function to apply to each group.
Returns:
A new combined DataFrame with the result of all groups.
"""
assert callable(f), "'{0}' object is not callable".format(type(f))
from .dataframe import DataFrame
if self._is_multi_by:
return self._default_to_pandas(f, **kwargs)
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze()
else:
by = self._by
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop and self._drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
new_manager = groupby_qc.groupby_agg(
by, self._axis, f, self._kwargs, kwargs, drop=self._drop
)
if self._idx_name is not None and self._as_index:
new_manager.index.name = self._idx_name
return DataFrame(query_compiler=new_manager)
|
def _apply_agg_function(self, f, drop=True, **kwargs):
"""Perform aggregation and combine stages based on a given function.
Args:
f: The function to apply to each group.
Returns:
A new combined DataFrame with the result of all groups.
"""
assert callable(f), "'{0}' object is not callable".format(type(f))
from .dataframe import DataFrame
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze()
else:
by = self._by
if self._is_multi_by:
return self._default_to_pandas(f, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop and self._drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
new_manager = groupby_qc.groupby_agg(
by, self._axis, f, self._kwargs, kwargs, drop=self._drop
)
if self._idx_name is not None and self._as_index:
new_manager.index.name = self._idx_name
return DataFrame(query_compiler=new_manager)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def _default_to_pandas(self, f, **kwargs):
"""Defailts the execution of this function to pandas.
Args:
f: The function to apply to each group.
Returns:
A new Modin DataFrame with the result of the pandas function.
"""
if isinstance(self._by, type(self._query_compiler)) and len(self._by.columns) == 1:
by = self._by.to_pandas().squeeze()
elif isinstance(self._by, type(self._query_compiler)):
by = list(self._by.columns)
else:
by = self._by
def groupby_on_multiple_columns(df):
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
return self._df._default_to_pandas(groupby_on_multiple_columns)
|
def _default_to_pandas(self, f, **kwargs):
"""Defailts the execution of this function to pandas.
Args:
f: The function to apply to each group.
Returns:
A new Modin DataFrame with the result of the pandas function.
"""
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze()
else:
by = self._by
def groupby_on_multiple_columns(df):
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
return self._df._default_to_pandas(groupby_on_multiple_columns)
|
https://github.com/modin-project/modin/issues/1073
|
Traceback (most recent call last):
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 190, in <module>
query(query_df)
File "/localdisk/gashiman/omniscripts/taxi/taxibench_pandas.py", line 95, in q3
return transformed.groupby(['passenger_count','pickup_datetime'])[['passenger_count','pickup_datetime']].count()['passenger_count']
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 249, in __getitem__
return SeriesGroupBy(self._default_to_pandas(lambda df: df.__getitem__(key)))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 535, in _default_to_pandas
return self._df._default_to_pandas(groupby_on_multiple_columns)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 227, in _default_to_pandas
result = op(pandas_obj, *args, **kwargs)
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/groupby.py", line 533, in groupby_on_multiple_columns
return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/generic.py", line 7894, in groupby
**kwargs
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 2522, in groupby
return klass(obj, by, **kwds)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/groupby.py", line 391, in __init__
mutated=self.mutated,
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 652, in _get_grouper
if not isinstance(gpr, Grouping)
File "/nfs/site/home/gashiman/.local/lib/python3.7/site-packages/pandas/core/groupby/grouper.py", line 347, in __init__
raise ValueError("Grouper for '{}' not 1-dimensional".format(t))
ValueError: Grouper for '<class 'pandas.core.frame.DataFrame'>' not 1-dimensional
|
ValueError
|
def drop_duplicates(self, keep="first", inplace=False, **kwargs):
"""Return DataFrame with duplicate rows removed, optionally only considering certain columns
Args:
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns:
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, "inplace")
subset = kwargs.get("subset", None)
if subset is not None:
if is_list_like(subset):
if not isinstance(subset, list):
subset = list(subset)
else:
subset = [subset]
duplicates = self.duplicated(keep=keep, subset=subset)
else:
duplicates = self.duplicated(keep=keep)
indices = duplicates.values.nonzero()[0]
return self.drop(index=self.index[indices], inplace=inplace)
|
def drop_duplicates(self, keep="first", inplace=False, **kwargs):
"""Return DataFrame with duplicate rows removed, optionally only considering certain columns
Args:
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns:
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if kwargs.get("subset", None) is not None:
duplicates = self.duplicated(keep=keep, subset=kwargs.get("subset"))
else:
duplicates = self.duplicated(keep=keep)
indices = duplicates.values.nonzero()[0]
return self.drop(index=self.index[indices], inplace=inplace)
|
https://github.com/modin-project/modin/issues/1115
|
2020-02-27 10:22:13,644 INFO resource_spec.py:216 -- Starting Ray with 200.0 GiB memory available for workers and up to 200.0 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-02-27 10:22:13,899 WARNING services.py:1365 -- WARNING: object_store_memory is not verified when plasma_directory is set.
UserWarning: Distributing <class 'list'> object. This may take some time.
name max_speed health
1 one 1 10
2 two 4 20
3 three 7 30
Traceback (most recent call last):
File "drop_duplicates_test.py", line 13, in <module>
df1 = df.drop_duplicates("name")
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/dataframe.py", line 208, in drop_duplicates
subset=subset, keep=keep, inplace=inplace
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 1120, in drop_duplicates
duplicates = self.duplicated(keep=keep, subset=kwargs.get("subset"))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/dataframe.py", line 242, in duplicated
if len(df.columns) > 1:
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/series.py", line 227, in __getattr__
raise e
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/series.py", line 223, in __getattr__
return object.__getattribute__(self, key)
AttributeError: 'Series' object has no attribute 'columns'
|
AttributeError
|
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `self.__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, self.__name__)(**init_kwargs).apply(
func,
axis=axis,
raw=raw,
result_type=result_type,
)
).__name__
except Exception:
return_type = self.__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if isinstance(result, Series):
if axis == 0 and result.name == self.index[0]:
result.name = None
elif axis == 1 and result.name == self.columns[0]:
result.name = None
return result
|
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `self.__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, self.__name__)(**init_kwargs).apply(
func,
axis=axis,
raw=raw,
result_type=result_type,
)
).__name__
except Exception:
return_type = self.__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if hasattr(result, "name"):
if axis == 0 and result.name == self.index[0]:
result.name = None
elif axis == 1 and result.name == self.columns[0]:
result.name = None
return result
|
https://github.com/modin-project/modin/issues/1115
|
2020-02-27 10:22:13,644 INFO resource_spec.py:216 -- Starting Ray with 200.0 GiB memory available for workers and up to 200.0 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-02-27 10:22:13,899 WARNING services.py:1365 -- WARNING: object_store_memory is not verified when plasma_directory is set.
UserWarning: Distributing <class 'list'> object. This may take some time.
name max_speed health
1 one 1 10
2 two 4 20
3 three 7 30
Traceback (most recent call last):
File "drop_duplicates_test.py", line 13, in <module>
df1 = df.drop_duplicates("name")
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/dataframe.py", line 208, in drop_duplicates
subset=subset, keep=keep, inplace=inplace
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/base.py", line 1120, in drop_duplicates
duplicates = self.duplicated(keep=keep, subset=kwargs.get("subset"))
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/dataframe.py", line 242, in duplicated
if len(df.columns) > 1:
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/series.py", line 227, in __getattr__
raise e
File "/nfs/site/proj/scripting_tools/gashiman/modin/modin/pandas/series.py", line 223, in __getattr__
return object.__getattribute__(self, key)
AttributeError: 'Series' object has no attribute 'columns'
|
AttributeError
|
def _validate_other(
self,
other,
axis,
numeric_only=False,
numeric_or_time_only=False,
numeric_or_object_only=False,
comparison_dtypes_only=False,
):
"""Helper method to check validity of other in inter-df operations"""
# We skip dtype checking if the other is a scalar.
if is_scalar(other):
return other
axis = self._get_axis_number(axis) if axis is not None else 1
result = other
if isinstance(other, BasePandasDataset):
return other._query_compiler
elif is_list_like(other):
if isinstance(other, pandas.Series):
other = other.reindex(self.axes[axis])
if axis == 0:
if len(other) != len(self._query_compiler.index):
raise ValueError(
"Unable to coerce to Series, length must be {0}: given {1}".format(
len(self._query_compiler.index), len(other)
)
)
else:
if len(other) != len(self._query_compiler.columns):
raise ValueError(
"Unable to coerce to Series, length must be {0}: given {1}".format(
len(self._query_compiler.columns), len(other)
)
)
if hasattr(other, "dtype"):
other_dtypes = [other.dtype] * len(other)
else:
other_dtypes = [type(x) for x in other]
else:
other_dtypes = [
type(other)
for _ in range(
len(self._query_compiler.index)
if axis
else len(self._query_compiler.columns)
)
]
# Do dtype checking.
if numeric_only:
if not all(
is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation on non-numeric dtypes")
elif numeric_or_object_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype))
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation non-numeric dtypes")
elif comparison_dtypes_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
or is_dtype_equal(self_dtype, other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
elif numeric_or_time_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
return result
|
def _validate_other(
self,
other,
axis,
numeric_only=False,
numeric_or_time_only=False,
numeric_or_object_only=False,
comparison_dtypes_only=False,
):
"""Helper method to check validity of other in inter-df operations"""
# We skip dtype checking if the other is a scalar.
if is_scalar(other):
return other
axis = self._get_axis_number(axis) if axis is not None else 1
result = other
if isinstance(other, BasePandasDataset):
return other._query_compiler
elif is_list_like(other):
if axis == 0:
if len(other) != len(self._query_compiler.index):
raise ValueError(
"Unable to coerce to Series, length must be {0}: given {1}".format(
len(self._query_compiler.index), len(other)
)
)
else:
if len(other) != len(self._query_compiler.columns):
raise ValueError(
"Unable to coerce to Series, length must be {0}: given {1}".format(
len(self._query_compiler.columns), len(other)
)
)
if hasattr(other, "dtype"):
other_dtypes = [other.dtype] * len(other)
else:
other_dtypes = [type(x) for x in other]
else:
other_dtypes = [
type(other)
for _ in range(
len(self._query_compiler.index)
if axis
else len(self._query_compiler.columns)
)
]
# Do dtype checking.
if numeric_only:
if not all(
is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation on non-numeric dtypes")
elif numeric_or_object_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype))
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError("Cannot do operation non-numeric dtypes")
elif comparison_dtypes_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
or is_dtype_equal(self_dtype, other_dtype)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
elif numeric_or_time_only:
if not all(
(is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype))
or (
is_datetime_or_timedelta_dtype(self_dtype)
and is_datetime_or_timedelta_dtype(other_dtype)
)
for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes)
):
raise TypeError(
"Cannot do operation non-numeric objects with numeric objects"
)
return result
|
https://github.com/modin-project/modin/issues/1059
|
Traceback (most recent call last):
File "bug.py", line 13, in <module>
print(mdf + ms)
File "/Users/wonchanl/Workspace/modinv/modin/pandas/dataframe.py", line 2084, in __add__
return self.add(other, axis=axis, level=level, fill_value=fill_value)
File "/Users/wonchanl/Workspace/modinv/modin/pandas/dataframe.py", line 480, in add
other, axis=axis, level=level, fill_value=fill_value
File "/Users/wonchanl/Workspace/modinv/modin/pandas/base.py", line 335, in add
"add", other, axis=axis, level=level, fill_value=fill_value
File "/Users/wonchanl/Workspace/modinv/modin/pandas/base.py", line 206, in _binary_op
other = self._validate_other(other, axis, numeric_or_object_only=True)
File "/Users/wonchanl/Workspace/modinv/modin/pandas/base.py", line 136, in _validate_other
len(self._query_compiler.columns), len(other)
ValueError: Unable to coerce to Series, length must be 2: given 1
|
ValueError
|
def __getitem__(self, key):
# When getting along a single axis,
if not isinstance(key, tuple):
# Try to fasttrack the code through already optimized path
try:
return self.df.__getitem__(key)
# This can happen if it is a list of rows
except KeyError:
pass
else:
if len(key) > self.df.ndim:
raise IndexingError("Too many indexers")
# If we're only slicing columns, handle the case with `__getitem__`
if isinstance(key[0], slice) and key[0] == slice(None):
if not isinstance(key[1], slice):
# Boolean indexers can just be sliced into the columns object and
# then passed to `__getitem__`
if is_boolean_array(key[1]):
return self.df.__getitem__(self.df.columns[key[1]])
return self.df.__getitem__(key[1])
else:
result_slice = self.df.columns.slice_locs(key[1].start, key[1].stop)
return self.df.iloc[:, slice(*result_slice)]
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
# Check that the row_lookup/col_lookup is longer than 1 or that the
# row_loc/col_loc is not boolean list to determine the ndim of the
# result properly for multiindex.
ndim = (0 if len(row_lookup) == 1 and not is_boolean_array(row_loc) else 1) + (
0 if len(col_lookup) == 1 and not is_boolean_array(col_loc) else 1
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
def __getitem__(self, key):
# When getting along a single axis,
if not isinstance(key, tuple):
# Try to fasttrack the code through already optimized path
try:
return self.df.__getitem__(key)
# This can happen if it is a list of rows
except KeyError:
pass
else:
if len(key) > self.df.ndim:
raise IndexingError("Too many indexers")
if isinstance(key[0], slice) and key[0] == slice(None):
if not isinstance(key[1], slice):
return self.df.__getitem__(key[1])
else:
result_slice = self.df.columns.slice_locs(key[1].start, key[1].stop)
return self.df.iloc[:, slice(*result_slice)]
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
# Check that the row_lookup/col_lookup is longer than 1 or that the
# row_loc/col_loc is not boolean list to determine the ndim of the
# result properly for multiindex.
ndim = (0 if len(row_lookup) == 1 and not is_boolean_array(row_loc) else 1) + (
0 if len(col_lookup) == 1 and not is_boolean_array(col_loc) else 1
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
https://github.com/modin-project/modin/issues/1032
|
Pandas:
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
UserWarning: Distributing <class 'list'> object. This may take some time.
Traceback (most recent call last):
File "modint.py", line 17, in <module>
o = mdf.loc[:, ~mdf.columns.duplicated()]
File "/opt/anaconda/lib/python3.7/site-packages/modin/pandas/indexing.py", line 216, in __getitem__
return self.df.__getitem__(key[1])
File "/opt/anaconda/lib/python3.7/site-packages/modin/pandas/base.py", line 3304, in __getitem__
return self._getitem(key)
File "/opt/anaconda/lib/python3.7/site-packages/modin/pandas/dataframe.py", line 1896, in _getitem
return self._getitem_array(key)
File "/opt/anaconda/lib/python3.7/site-packages/modin/pandas/dataframe.py", line 1926, in _getitem_array
len(key), len(self.index)
ValueError: Item wrong length 2 instead of 3.
|
ValueError
|
def __getitem__(self, key):
# When getting along a single axis,
if not isinstance(key, tuple):
# Try to fasttrack the code through already optimized path
try:
return self.df.__getitem__(key)
# This can happen if it is a list of rows
except KeyError:
pass
else:
if len(key) > self.df.ndim:
raise IndexingError("Too many indexers")
if isinstance(key[0], slice) and key[0] == slice(None):
if not isinstance(key[1], slice):
return self.df.__getitem__(key[1])
else:
result_slice = self.df.columns.slice_locs(key[1].start, key[1].stop)
return self.df.iloc[:, slice(*result_slice)]
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
# Check that the row_lookup/col_lookup is longer than 1 or that the
# row_loc/col_loc is not boolean list to determine the ndim of the
# result properly for multiindex.
ndim = (0 if len(row_lookup) == 1 and not is_boolean_array(row_loc) else 1) + (
0 if len(col_lookup) == 1 and not is_boolean_array(col_loc) else 1
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
def __getitem__(self, key):
# When getting along a single axis,
if not isinstance(key, tuple):
# Try to fasttrack the code through already optimized path
try:
return self.df.__getitem__(key)
# This can happen if it is a list of rows
except KeyError:
pass
else:
if len(key) > self.df.ndim:
raise IndexingError("Too many indexers")
if isinstance(key[0], slice) and key[0] == slice(None):
return self.df.__getitem__(key[1])
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
# Check that the row_lookup/col_lookup is longer than 1 or that the
# row_loc/col_loc is not boolean list to determine the ndim of the
# result properly for multiindex.
ndim = (0 if len(row_lookup) == 1 and not is_boolean_array(row_loc) else 1) + (
0 if len(col_lookup) == 1 and not is_boolean_array(col_loc) else 1
)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
https://github.com/modin-project/modin/issues/1023
|
In [4]: df.loc[:, "col0":"col3"]
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-f1d9690a44cf> in <module>
----> 1 df.loc[:, "col0":"col3"]
~/software_builds/modin/modin/pandas/indexing.py in __getitem__(self, key)
213 raise IndexingError("Too many indexers")
214 if isinstance(key[0], slice) and key[0] == slice(None):
--> 215 return self.df.__getitem__(key[1])
216 row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
217 row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
~/software_builds/modin/modin/pandas/base.py in __getitem__(self, key)
3284 # This lets us reuse code in Pandas to error check
3285 indexer = convert_to_index_sliceable(
-> 3286 getattr(pandas, self.__name__)(index=self.index), key
3287 )
3288 if indexer is not None:
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/indexing.py in convert_to_index_sliceable(obj, key)
2356 idx = obj.index
2357 if isinstance(key, slice):
-> 2358 return idx._convert_slice_indexer(key, kind="getitem")
2359
2360 elif isinstance(key, str):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/indexes/base.py in _convert_slice_indexer(self, key, kind)
3188 if self.is_integer() or is_index_slice:
3189 return slice(
-> 3190 self._validate_indexer("slice", key.start, kind),
3191 self._validate_indexer("slice", key.stop, kind),
3192 self._validate_indexer("slice", key.step, kind),
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/indexes/base.py in _validate_indexer(self, form, key, kind)
5069 pass
5070 elif kind in ["iloc", "getitem"]:
-> 5071 self._invalid_indexer(form, key)
5072 return key
5073
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/indexes/base.py in _invalid_indexer(self, form, key)
3338 "cannot do {form} indexing on {klass} with these "
3339 "indexers [{key}] of {kind}".format(
-> 3340 form=form, klass=type(self), key=key, kind=type(key)
3341 )
3342 )
TypeError: cannot do slice indexing on <class 'pandas.core.indexes.range.RangeIndex'> with these indexers [col0] of <class 'str'>
|
TypeError
|
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
result_type=None,
convert_dtype=True,
args=(),
**kwds,
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = self._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, str):
if axis == 1:
kwds["axis"] = axis
result = self._string_function(func, *args, **kwds)
# Sometimes we can return a scalar here
if isinstance(result, BasePandasDataset):
return result._query_compiler
return result
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", 'occurred at index {0}'".format(
self.index[0]
)
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif not callable(func) and not is_list_like(func):
raise TypeError("{} object is not callable".format(type(func)))
query_compiler = self._query_compiler.apply(func, axis, args=args, **kwds)
return query_compiler
|
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
result_type=None,
convert_dtype=True,
args=(),
**kwds,
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = self._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, str):
if axis == 1:
kwds["axis"] = axis
result = self._string_function(func, *args, **kwds)
# Sometimes we can return a scalar here
if isinstance(result, BasePandasDataset):
return result._query_compiler
return result
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", 'occurred at index {0}'".format(
self.index[0]
)
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif not callable(func) and not is_list_like(func):
raise TypeError("{} object is not callable".format(type(func)))
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
return query_compiler
|
https://github.com/modin-project/modin/issues/915
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-69-936ab004d2a8> in <module>
2 df = pd.read_csv(file_path)
3 data_per_user = generate_data_per_user_dicts()
----> 4 df.apply(extract_data_per_user, axis=1, args=(data_per_user,))
5
6 file_name = file_path.split("/")[-1].strip(".csv")
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\pandas\dataframe.py in apply(self, func, axis, broadcast, raw, reduce, result_type, convert_dtype, args, **kwds)
279 convert_dtype=convert_dtype,
280 args=args,
--> 281 **kwds
282 )
283 if not isinstance(query_compiler, type(self._query_compiler)):
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\pandas\base.py in apply(self, func, axis, broadcast, raw, reduce, result_type, convert_dtype, args, **kwds)
561 elif not callable(func) and not is_list_like(func):
562 raise TypeError("{} object is not callable".format(type(func)))
--> 563 query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
564 return query_compiler
565
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\backends\pandas\query_compiler.py in apply(self, func, axis, *args, **kwargs)
1034 """
1035 if callable(func):
-> 1036 return self._callable_func(func, axis, *args, **kwargs)
1037 elif isinstance(func, dict):
1038 return self._dict_func(func, axis, *args, **kwargs)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\backends\pandas\query_compiler.py in _callable_func(self, func, axis, *args, **kwargs)
1111 else:
1112 new_modin_frame = self._modin_frame._apply_full_axis(
-> 1113 axis, lambda df: df.apply(func, axis=axis, *args, **kwargs)
1114 )
1115 return self.__constructor__(new_modin_frame)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\engines\base\frame\data.py in _apply_full_axis(self, axis, func, new_index, new_columns, dtypes)
847 if new_columns is None:
848 new_columns = self._frame_mgr_cls.get_indices(
--> 849 1, new_partitions, lambda df: df.columns
850 )
851 if new_index is None:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\engines\dask\pandas_on_dask_futures\frame\partition_manager.py in get_indices(cls, axis, partitions, index_func)
53 else []
54 )
---> 55 new_idx = client.gather(new_idx)
56 return new_idx[0].append(new_idx[1:]) if len(new_idx) else new_idx
~\AppData\Local\Continuum\anaconda3\lib\site-packages\distributed\client.py in gather(self, futures, errors, direct, asynchronous)
1871 direct=direct,
1872 local_worker=local_worker,
-> 1873 asynchronous=asynchronous,
1874 )
1875
~\AppData\Local\Continuum\anaconda3\lib\site-packages\distributed\client.py in sync(self, func, asynchronous, callback_timeout, *args, **kwargs)
766 else:
767 return sync(
--> 768 self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
769 )
770
~\AppData\Local\Continuum\anaconda3\lib\site-packages\distributed\utils.py in sync(loop, func, callback_timeout, *args, **kwargs)
332 if error[0]:
333 typ, exc, tb = error[0]
--> 334 raise exc.with_traceback(tb)
335 else:
336 return result[0]
~\AppData\Local\Continuum\anaconda3\lib\site-packages\distributed\utils.py in f()
316 if callback_timeout is not None:
317 future = gen.with_timeout(timedelta(seconds=callback_timeout), future)
--> 318 result[0] = yield future
319 except Exception as exc:
320 error[0] = sys.exc_info()
~\AppData\Local\Continuum\anaconda3\lib\site-packages\tornado\gen.py in run(self)
727
728 try:
--> 729 value = future.result()
730 except Exception:
731 exc_info = sys.exc_info()
~\AppData\Local\Continuum\anaconda3\lib\site-packages\distributed\client.py in _gather(self, futures, errors, direct, local_worker)
1727 exc = CancelledError(key)
1728 else:
-> 1729 raise exception.with_traceback(traceback)
1730 raise exc
1731 if errors == "skip":
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\engines\base\frame\axis_partition.py in deploy_axis_func()
187
188 dataframe = pandas.concat(list(partitions), axis=axis, copy=False)
--> 189 result = func(dataframe, **kwargs)
190 if isinstance(result, pandas.Series):
191 if num_splits == 1:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\engines\base\frame\data.py in _map_reduce_func()
652
653 def _map_reduce_func(df):
--> 654 series_result = func(df)
655 if axis == 0 and isinstance(series_result, pandas.Series):
656 # In the case of axis=0, we need to keep the shape of the data
~\AppData\Local\Continuum\anaconda3\lib\site-packages\modin\backends\pandas\query_compiler.py in <lambda>()
1111 else:
1112 new_modin_frame = self._modin_frame._apply_full_axis(
-> 1113 axis, lambda df: df.apply(func, axis=axis, *args, **kwargs)
1114 )
1115 return self.__constructor__(new_modin_frame)
TypeError: apply() got multiple values for argument 'axis'
|
TypeError
|
def read_sql(cls, sql, con, index_col=None, **kwargs):
"""Reads a SQL query or database table into a DataFrame.
Args:
sql: string or SQLAlchemy Selectable (select or text object) SQL query to be
executed or a table name.
con: SQLAlchemy connectable (engine/connection) or database string URI or
DBAPI2 connection (fallback mode)
index_col: Column(s) to set as index(MultiIndex).
kwargs: Pass into pandas.read_sql function.
"""
if cls.read_sql_remote_task is None:
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
import sqlalchemy as sa
try:
import psycopg2 as pg
if isinstance(con, pg.extensions.connection):
con = "postgresql+psycopg2://{}:{}@{}{}/{}".format( # Table in DB
con.info.user, # <Username>: for DB
con.info.password, # Password for DB
con.info.host if con.info.host != "/tmp" else "", # @<Hostname>
(":" + str(con.info.port)) if con.info.host != "/tmp" else "", # <port>
con.info.dbname, # Table in DB
)
except ImportError:
pass
# In the case that we are given a SQLAlchemy Connection or Engine, the objects
# are not pickleable. We have to convert it to the URL string and connect from
# each of the workers.
if isinstance(con, (sa.engine.Engine, sa.engine.Connection)):
warnings.warn(
"To use parallel implementation of `read_sql`, pass the "
"connection string instead of {}.".format(type(con))
)
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
row_cnt_query = "SELECT COUNT(*) FROM ({}) as foo".format(sql)
row_cnt = pandas.read_sql(row_cnt_query, con).squeeze()
cols_names_df = pandas.read_sql(
"SELECT * FROM ({}) as foo LIMIT 0".format(sql), con, index_col=index_col
)
cols_names = cols_names_df.columns
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
partition_ids = []
index_ids = []
limit = math.ceil(row_cnt / num_partitions)
for part in range(num_partitions):
offset = part * limit
query = "SELECT * FROM ({}) as foo LIMIT {} OFFSET {}".format(
sql, limit, offset
)
partition_id = cls.read_sql_remote_task._remote(
args=(num_partitions, query, con, index_col, kwargs),
num_return_vals=num_partitions + 1,
)
partition_ids.append(
[cls.frame_partition_cls(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
if index_col is None: # sum all lens returned from partitions
index_lens = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(index_lens))
else: # concat index returned from partitions
index_lst = [x for part_index in ray.get(index_ids) for x in part_index]
new_index = pandas.Index(index_lst).set_names(index_col)
new_frame = cls.frame_cls(np.array(partition_ids), new_index, cols_names)
new_frame._apply_index_objs(axis=0)
return cls.query_compiler_cls(new_frame)
|
def read_sql(cls, sql, con, index_col=None, **kwargs):
"""Reads a SQL query or database table into a DataFrame.
Args:
sql: string or SQLAlchemy Selectable (select or text object) SQL query to be
executed or a table name.
con: SQLAlchemy connectable (engine/connection) or database string URI or
DBAPI2 connection (fallback mode)
index_col: Column(s) to set as index(MultiIndex).
kwargs: Pass into pandas.read_sql function.
"""
if cls.read_sql_remote_task is None:
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
import sqlalchemy as sa
# In the case that we are given a SQLAlchemy Connection or Engine, the objects
# are not pickleable. We have to convert it to the URL string and connect from
# each of the workers.
if isinstance(con, (sa.engine.Engine, sa.engine.Connection)):
warnings.warn(
"To use parallel implementation of `read_sql`, pass the "
"connection string instead of {}.".format(type(con))
)
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
row_cnt_query = "SELECT COUNT(*) FROM ({}) as foo".format(sql)
row_cnt = pandas.read_sql(row_cnt_query, con).squeeze()
cols_names_df = pandas.read_sql(
"SELECT * FROM ({}) as foo LIMIT 0".format(sql), con, index_col=index_col
)
cols_names = cols_names_df.columns
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
partition_ids = []
index_ids = []
limit = math.ceil(row_cnt / num_partitions)
for part in range(num_partitions):
offset = part * limit
query = "SELECT * FROM ({}) as foo LIMIT {} OFFSET {}".format(
sql, limit, offset
)
partition_id = cls.read_sql_remote_task._remote(
args=(num_partitions, query, con, index_col, kwargs),
num_return_vals=num_partitions + 1,
)
partition_ids.append(
[cls.frame_partition_cls(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
if index_col is None: # sum all lens returned from partitions
index_lens = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(index_lens))
else: # concat index returned from partitions
index_lst = [x for part_index in ray.get(index_ids) for x in part_index]
new_index = pandas.Index(index_lst).set_names(index_col)
new_frame = cls.frame_cls(np.array(partition_ids), new_index, cols_names)
new_frame._apply_index_objs(axis=0)
return cls.query_compiler_cls(new_frame)
|
https://github.com/modin-project/modin/issues/729
|
In [1]: import pandas as pd
In [2]: import psycopg2
...:
...:
...: CONN = psycopg2.connect(dbname="tmpdb", user="xx",
...: password="xx", host="xx")
In [3]: df = pd.read_sql("SELECT time FROM tmpdb", CONN)
In [4]: import modin.pandas as pd
In [5]: df = pd.read_sql("SELECT time FROM tmpdb", CONN)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/stream_exit/lib/python3.6/site-packages/ray/worker.py in put_object(self, object_id, value)
382 try:
--> 383 self.store_and_register(object_id, value)
384 except pyarrow.PlasmaObjectExists:
~/stream_exit/lib/python3.6/site-packages/ray/worker.py in store_and_register(self, object_id, value, depth)
316 serialization_context=self.get_serialization_context(
--> 317 self.task_driver_id))
318 break
~/stream_exit/lib/python3.6/site-packages/ray/utils.py in _wrapper(*args, **kwargs)
478 with self.lock:
--> 479 return orig_attr(*args, **kwargs)
480
~/stream_exit/lib/python3.6/site-packages/ray/pyarrow_files/pyarrow/_plasma.pyx in pyarrow._plasma.PlasmaClient.put()
~/stream_exit/lib/python3.6/site-packages/ray/pyarrow_files/pyarrow/serialization.pxi in pyarrow.lib.serialize()
~/stream_exit/lib/python3.6/site-packages/ray/pyarrow_files/pyarrow/serialization.pxi in pyarrow.lib.SerializationContext._serialize_callback()
~/stream_exit/lib/python3.6/site-packages/ray/cloudpickle/cloudpickle.py in dumps(obj, protocol)
951 cp = CloudPickler(file, protocol=protocol)
--> 952 cp.dump(obj)
953 return file.getvalue()
~/stream_exit/lib/python3.6/site-packages/ray/cloudpickle/cloudpickle.py in dump(self, obj)
266 try:
--> 267 return Pickler.dump(self, obj)
268 except RuntimeError as e:
/usr/local/var/pyenv/versions/3.6.4/lib/python3.6/pickle.py in dump(self, obj)
408 self.framer.start_framing()
--> 409 self.save(obj)
410 self.write(STOP)
/usr/local/var/pyenv/versions/3.6.4/lib/python3.6/pickle.py in save(self, obj, save_persistent_id)
495 if reduce is not None:
--> 496 rv = reduce(self.proto)
497 else:
TypeError: can't pickle psycopg2.extensions.connection objects
|
TypeError
|
def astype(self, col_dtypes):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
dataframe with updated dtypes.
"""
columns = col_dtypes.keys()
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtypes[column] = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtypes[column] = np.dtype("float64")
# We cannot infer without computing the dtype if
elif isinstance(new_dtype, str) and new_dtype == "category":
new_dtypes = None
break
def astype_builder(df):
return df.astype({k: v for k, v in col_dtypes.items() if k in df})
new_frame = self._frame_mgr_cls.map_partitions(self._partitions, astype_builder)
return self.__constructor__(
new_frame,
self.index,
self.columns,
self._row_lengths,
self._column_widths,
new_dtypes,
)
|
def astype(self, col_dtypes):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
dataframe with updated dtypes.
"""
columns = col_dtypes.keys()
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
def astype_builder(df):
return df.astype({k: v for k, v in col_dtypes.items() if k in df})
new_frame = self._frame_mgr_cls.map_partitions(self._partitions, astype_builder)
return self.__constructor__(
new_frame,
self.index,
self.columns,
self._row_lengths,
self._column_widths,
new_dtypes,
)
|
https://github.com/modin-project/modin/issues/864
|
a = pd.Series(['A', 'A', 'B', 'B', 'A'])
a = a.astype('category')
a_df = a.to_frame()
a_df.select_dtypes('category')
Traceback (most recent call last):
File "/home/bar/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-19-15c3ee7df6bb>", line 1, in <module>
a_df.select_dtypes('category')
File "/home/bar/.local/lib/python3.6/site-packages/modin/pandas/dataframe.py", line 1412, in select_dtypes
is_dtype_instance_mapper, self.dtypes.iteritems()
File "/home/bar/.local/lib/python3.6/site-packages/modin/pandas/dataframe.py", line 1409, in is_dtype_instance_mapper
return column, functools.partial(issubclass, dtype.type)
AttributeError: 'str' object has no attribute 'type'
|
AttributeError
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
idx_name = by
if (
isinstance(self.axes[axis], pandas.MultiIndex)
and by in self.axes[axis].names
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif is_list_like(by):
if isinstance(by, Series):
idx_name = by.name
by = by.values
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
obj in self or (hasattr(self.index, "names") and obj in self.index.names)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
if by is None and level is not None and axis == 0:
if not isinstance(level, str):
by = self.axes[axis].names[level]
level = None
else:
by = level
level = None
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
**kwargs,
)
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
idx_name = by
if (
isinstance(self.axes[axis], pandas.MultiIndex)
and by in self.axes[axis].names
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif is_list_like(by):
if isinstance(by, Series):
idx_name = by.name
by = by.values
mismatch = len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
if mismatch and all(obj in self for obj in by):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
if by is None and level is not None and axis == 0:
if not isinstance(level, str):
by = self.axes[axis].names[level]
level = None
else:
by = level
level = None
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
**kwargs,
)
|
https://github.com/modin-project/modin/issues/783
|
In [1]: import modin.pandas
...: import pandas as pd
...:
...: # Working example with Pandas
...: csv1=pd.read_csv('test.dat',header=0,index_col=[0,1])
...: g1=csv1.groupby(['p0', 'p1'])
...:
...: # Testing with modin.pandas
...: csv2=modin.pandas.read_csv('test.dat',header=0,index_col=[0,1])
...: g2=csv2.groupby(['p0', 'p1'])
...:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-1-57918f70663a> in <module>()
8 # Testing with modin.pandas
9 csv2=modin.pandas.read_csv('test.dat',header=0,index_col=[0,1])
---> 10 g2=csv2.groupby(['p0', 'p1'])
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, **kwargs)
370 pass
371 elif mismatch:
--> 372 raise KeyError(next(x for x in by if x not in self))
373
374 if by is None and level is not None and axis == 0:
KeyError: 'p0'
|
KeyError
|
def map(self, arg, na_action=None):
if not callable(arg) and hasattr(arg, "get"):
mapper = arg
def arg(s):
return mapper.get(s, np.nan)
return self.__constructor__(
query_compiler=self._query_compiler.applymap(
lambda s: arg(s) if not pandas.isnull(s) or na_action is None else s
)
)
|
def map(self, arg, na_action=None):
return self.__constructor__(query_compiler=self._query_compiler.applymap(arg))
|
https://github.com/modin-project/modin/issues/778
|
Traceback (most recent call last):
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-df805f17ff4c>", line 12, in <module>
df.loc[:, "column"] = df.loc[:, "column"].map(mapper)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/pandas/indexing.py", line 256, in __setitem__
super(_LocIndexer, self).__setitem__(row_lookup, col_lookup, item)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/pandas/indexing.py", line 145, in __setitem__
item = self._broadcast_item(row_lookup, col_lookup, item, to_shape)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/pandas/indexing.py", line 172, in _broadcast_item
item = np.array(item)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/pandas/series.py", line 123, in __array__
return super(Series, self).__array__(dtype).flatten()
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/pandas/base.py", line 3145, in __array__
arr = self.to_numpy(dtype)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/pandas/series.py", line 1024, in to_numpy
return super(Series, self).to_numpy(dtype, copy).flatten()
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/pandas/base.py", line 2946, in to_numpy
arr = self._query_compiler.to_numpy()
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py", line 143, in to_numpy
arr = self._modin_frame.to_numpy()
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/engines/base/frame/data.py", line 1137, in to_numpy
return self._frame_mgr_cls.to_numpy(self._partitions)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/engines/ray/generic/frame/partition_manager.py", line 20, in to_numpy
for row in partitions
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/ray/worker.py", line 2247, in get
raise value
ray.exceptions.RayTaskError: ray_worker (pid=26958, host=LH002609)
ray.exceptions.RayTaskError: ray_worker (pid=26965, host=LH002609)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/engines/ray/pandas_on_ray/frame/partition.py", line 197, in deploy_ray_func
result = func(partition, **kwargs)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/modin/data_management/functions/mapfunction.py", line 10, in <lambda>
lambda x: function(x, *args, **kwargs), *call_args, **call_kwds
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/pandas/core/frame.py", line 6979, in applymap
return self.apply(infer)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/pandas/core/frame.py", line 6913, in apply
return op.get_result()
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/pandas/core/apply.py", line 186, in get_result
return self.apply_standard()
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/pandas/core/apply.py", line 292, in apply_standard
self.apply_series_generator()
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/pandas/core/apply.py", line 321, in apply_series_generator
results[i] = self.f(v)
File "/home/jason/.virtualenvs/bert_poc/lib/python3.7/site-packages/pandas/core/frame.py", line 6977, in infer
return lib.map_infer(x.astype(object).values, func)
File "pandas/_libs/lib.pyx", line 2228, in pandas._libs.lib.map_infer
TypeError: ("'dict' object is not callable", 'occurred at index column')
|
ray.exceptions.RayTaskError
|
def _apply_full_axis(self, axis, func, new_index=None, new_columns=None, dtypes=None):
"""Perform a function across an entire axis.
Note: The data shape may change as a result of the function.
Args:
axis: The axis to apply over.
func: The function to apply.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes: (optional) The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, func
)
# Index objects for new object creation. This is shorter than if..else
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
0, new_partitions, lambda df: df.index
)
# Length objects for new object creation. This is shorter than if..else
# This object determines the lengths and widths based on the given parameters
# and builds a dictionary used in the constructor below. 0 gives the row lengths
# and 1 gives the column widths. Since the dimension of `axis` given may have
# changed, we current just recompute it.
lengths_objs = {
axis: None,
axis ^ 1: [self._row_lengths, self._column_widths][axis ^ 1],
}
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series([np.dtype(dtypes)] * len(new_columns), index=new_columns)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
lengths_objs[0],
lengths_objs[1],
dtypes,
)
|
def _apply_full_axis(self, axis, func, new_index=None, new_columns=None, dtypes=None):
"""Perform a function across an entire axis.
Note: The data shape may change as a result of the function.
Args:
axis: The axis to apply over.
func: The function to apply.
new_index: (optional) The index of the result. We may know this in advance,
and if not provided it must be computed.
new_columns: (optional) The columns of the result. We may know this in
advance, and if not provided it must be computed.
dtypes: (optional) The data types of the result. This is an optimization
because there are functions that always result in a particular data
type, and allows us to avoid (re)computing it.
Returns:
A new dataframe.
"""
new_partitions = self._frame_mgr_cls.map_axis_partitions(
axis, self._partitions, func
)
# Index objects for new object creation. This is shorter than if..else
if new_columns is None:
new_columns = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.columns
)
if new_index is None:
new_index = self._frame_mgr_cls.get_indices(
1, new_partitions, lambda df: df.index
)
# Length objects for new object creation. This is shorter than if..else
# This object determines the lengths and widths based on the given parameters
# and builds a dictionary used in the constructor below. 0 gives the row lengths
# and 1 gives the column widths. Since the dimension of `axis` given may have
# changed, we current just recompute it.
lengths_objs = {
axis: None,
axis ^ 1: [self._row_lengths, self._column_widths][axis ^ 1],
}
if dtypes == "copy":
dtypes = self._dtypes
elif dtypes is not None:
dtypes = pandas.Series([np.dtype(dtypes)] * len(new_columns), index=new_columns)
return self.__constructor__(
new_partitions,
new_index,
new_columns,
lengths_objs[0],
lengths_objs[1],
dtypes,
)
|
https://github.com/modin-project/modin/issues/758
|
p0 Index(['data0', 'data1', 'data2'], dtype='object')
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-6-704c3a79c97b> in <module>()
----> 1 df_g=csv2.groupby('p0').get_group('A')
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, **kwargs)
353 elif isinstance(by, string_types):
354 idx_name = by
--> 355 by = self.__getitem__(by)._query_compiler
356 elif is_list_like(by):
357 if isinstance(by, Series):
~/.local/lib/python3.7/site-packages/modin/pandas/base.py in __getitem__(self, key)
3220 return self._getitem_slice(indexer)
3221 else:
-> 3222 return self._getitem(key)
3223
3224 def _getitem_slice(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem(self, key)
1835 else:
1836 print(key, self.columns)
-> 1837 return self._getitem_column(key)
1838
1839 def _getitem_column(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem_column(self, key)
1839 def _getitem_column(self, key):
1840 if key not in self.keys():
-> 1841 raise KeyError("{}".format(key))
1842 s = self._reduce_dimension(self._query_compiler.getitem_column_array([key]))
1843 s._parent = self
KeyError: 'p0'
|
KeyError
|
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = self._get_axis_number(axis) if axis is not None else 0
if level is not None:
if not isinstance(self.axes[axis], pandas.MultiIndex):
# error thrown by pandas
raise TypeError("Can only count levels on hierarchical columns.")
if isinstance(level, str):
level = self.axes[axis].names.index(level)
return self.groupby(level=level, axis=axis).count()
return self._reduce_dimension(
self._query_compiler.count(axis=axis, level=level, numeric_only=numeric_only)
)
|
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = self._get_axis_number(axis) if axis is not None else 0
if level is not None:
if not isinstance(self.axes[axis], pandas.MultiIndex):
# error thrown by pandas
raise TypeError("Can only count levels on hierarchical columns.")
if isinstance(level, str):
level = self.axes[axis].names.index(level)
new_names = dict(
zip(
range(len(self.axes[axis].levels[level])),
self.axes[axis].levels[level],
)
)
return (
self.groupby(self.axes[axis].codes[level], axis=axis)
.count()
.rename(new_names, axis=axis)
.rename_axis(self.axes[axis].names[level], axis=axis)
)
return self._reduce_dimension(
self._query_compiler.count(axis=axis, level=level, numeric_only=numeric_only)
)
|
https://github.com/modin-project/modin/issues/758
|
p0 Index(['data0', 'data1', 'data2'], dtype='object')
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-6-704c3a79c97b> in <module>()
----> 1 df_g=csv2.groupby('p0').get_group('A')
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, **kwargs)
353 elif isinstance(by, string_types):
354 idx_name = by
--> 355 by = self.__getitem__(by)._query_compiler
356 elif is_list_like(by):
357 if isinstance(by, Series):
~/.local/lib/python3.7/site-packages/modin/pandas/base.py in __getitem__(self, key)
3220 return self._getitem_slice(indexer)
3221 else:
-> 3222 return self._getitem(key)
3223
3224 def _getitem_slice(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem(self, key)
1835 else:
1836 print(key, self.columns)
-> 1837 return self._getitem_column(key)
1838
1839 def _getitem_column(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem_column(self, key)
1839 def _getitem_column(self, key):
1840 if key not in self.keys():
-> 1841 raise KeyError("{}".format(key))
1842 s = self._reduce_dimension(self._query_compiler.getitem_column_array([key]))
1843 s._parent = self
KeyError: 'p0'
|
KeyError
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
idx_name = by
if (
isinstance(self.axes[axis], pandas.MultiIndex)
and by in self.axes[axis].names
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif is_list_like(by):
if isinstance(by, Series):
idx_name = by.name
by = by.values
mismatch = len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
if mismatch and all(obj in self for obj in by):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
if by is None and level is not None and axis == 0:
if not isinstance(level, str):
by = self.axes[axis].names[level]
level = None
else:
by = level
level = None
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
**kwargs,
)
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
idx_name = by
by = self.__getitem__(by)._query_compiler
elif is_list_like(by):
if isinstance(by, Series):
idx_name = by.name
by = by.values
mismatch = len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
if mismatch and all(obj in self for obj in by):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
**kwargs,
)
|
https://github.com/modin-project/modin/issues/758
|
p0 Index(['data0', 'data1', 'data2'], dtype='object')
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-6-704c3a79c97b> in <module>()
----> 1 df_g=csv2.groupby('p0').get_group('A')
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, **kwargs)
353 elif isinstance(by, string_types):
354 idx_name = by
--> 355 by = self.__getitem__(by)._query_compiler
356 elif is_list_like(by):
357 if isinstance(by, Series):
~/.local/lib/python3.7/site-packages/modin/pandas/base.py in __getitem__(self, key)
3220 return self._getitem_slice(indexer)
3221 else:
-> 3222 return self._getitem(key)
3223
3224 def _getitem_slice(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem(self, key)
1835 else:
1836 print(key, self.columns)
-> 1837 return self._getitem_column(key)
1838
1839 def _getitem_column(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem_column(self, key)
1839 def _getitem_column(self, key):
1840 if key not in self.keys():
-> 1841 raise KeyError("{}".format(key))
1842 s = self._reduce_dimension(self._query_compiler.getitem_column_array([key]))
1843 s._parent = self
KeyError: 'p0'
|
KeyError
|
def _groupby_reduce(
self, map_func, reduce_func, drop=True, numeric_only=True, **kwargs
):
if self._is_multi_by or self._level is not None:
return self._default_to_pandas(map_func, **kwargs)
if not isinstance(self._by, type(self._query_compiler)):
return self._apply_agg_function(map_func, drop=drop, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
from .dataframe import DataFrame
return DataFrame(
query_compiler=groupby_qc.groupby_reduce(
self._by,
self._axis,
self._kwargs,
map_func,
kwargs,
reduce_func=reduce_func,
reduce_args=kwargs,
numeric_only=numeric_only,
)
)
|
def _groupby_reduce(
self, map_func, reduce_func, drop=True, numeric_only=True, **kwargs
):
if (
self._is_multi_by
or self._level is not None
or isinstance(self._df.axes[self._axis], pandas.MultiIndex)
):
return self._default_to_pandas(map_func, **kwargs)
if not isinstance(self._by, type(self._query_compiler)):
return self._apply_agg_function(map_func, drop=drop, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
from .dataframe import DataFrame
return DataFrame(
query_compiler=groupby_qc.groupby_reduce(
self._by,
self._axis,
self._kwargs,
map_func,
kwargs,
reduce_func=reduce_func,
reduce_args=kwargs,
numeric_only=numeric_only,
)
)
|
https://github.com/modin-project/modin/issues/758
|
p0 Index(['data0', 'data1', 'data2'], dtype='object')
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-6-704c3a79c97b> in <module>()
----> 1 df_g=csv2.groupby('p0').get_group('A')
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, **kwargs)
353 elif isinstance(by, string_types):
354 idx_name = by
--> 355 by = self.__getitem__(by)._query_compiler
356 elif is_list_like(by):
357 if isinstance(by, Series):
~/.local/lib/python3.7/site-packages/modin/pandas/base.py in __getitem__(self, key)
3220 return self._getitem_slice(indexer)
3221 else:
-> 3222 return self._getitem(key)
3223
3224 def _getitem_slice(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem(self, key)
1835 else:
1836 print(key, self.columns)
-> 1837 return self._getitem_column(key)
1838
1839 def _getitem_column(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem_column(self, key)
1839 def _getitem_column(self, key):
1840 if key not in self.keys():
-> 1841 raise KeyError("{}".format(key))
1842 s = self._reduce_dimension(self._query_compiler.getitem_column_array([key]))
1843 s._parent = self
KeyError: 'p0'
|
KeyError
|
def _apply_agg_function(self, f, drop=True, **kwargs):
"""Perform aggregation and combine stages based on a given function.
Args:
f: The function to apply to each group.
Returns:
A new combined DataFrame with the result of all groups.
"""
assert callable(f), "'{0}' object is not callable".format(type(f))
from .dataframe import DataFrame
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze()
else:
by = self._by
if self._is_multi_by or self._level is not None:
return self._default_to_pandas(f, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
new_manager = groupby_qc.groupby_agg(by, self._axis, f, self._kwargs, kwargs)
if self._idx_name is not None and self._as_index:
new_manager.index.name = self._idx_name
return DataFrame(query_compiler=new_manager)
|
def _apply_agg_function(self, f, drop=True, **kwargs):
"""Perform aggregation and combine stages based on a given function.
Args:
f: The function to apply to each group.
Returns:
A new combined DataFrame with the result of all groups.
"""
assert callable(f), "'{0}' object is not callable".format(type(f))
from .dataframe import DataFrame
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze()
else:
by = self._by
if (
self._is_multi_by
or self._level is not None
or isinstance(self._index, pandas.MultiIndex)
):
return self._default_to_pandas(f, **kwargs)
# For aggregations, pandas behavior does this for the result.
# For other operations it does not, so we wait until there is an aggregation to
# actually perform this operation.
if self._idx_name is not None and drop:
groupby_qc = self._query_compiler.drop(columns=[self._idx_name])
else:
groupby_qc = self._query_compiler
new_manager = groupby_qc.groupby_agg(by, self._axis, f, self._kwargs, kwargs)
if self._idx_name is not None and self._as_index:
new_manager.index.name = self._idx_name
return DataFrame(query_compiler=new_manager)
|
https://github.com/modin-project/modin/issues/758
|
p0 Index(['data0', 'data1', 'data2'], dtype='object')
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-6-704c3a79c97b> in <module>()
----> 1 df_g=csv2.groupby('p0').get_group('A')
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, **kwargs)
353 elif isinstance(by, string_types):
354 idx_name = by
--> 355 by = self.__getitem__(by)._query_compiler
356 elif is_list_like(by):
357 if isinstance(by, Series):
~/.local/lib/python3.7/site-packages/modin/pandas/base.py in __getitem__(self, key)
3220 return self._getitem_slice(indexer)
3221 else:
-> 3222 return self._getitem(key)
3223
3224 def _getitem_slice(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem(self, key)
1835 else:
1836 print(key, self.columns)
-> 1837 return self._getitem_column(key)
1838
1839 def _getitem_column(self, key):
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in _getitem_column(self, key)
1839 def _getitem_column(self, key):
1840 if key not in self.keys():
-> 1841 raise KeyError("{}".format(key))
1842 s = self._reduce_dimension(self._query_compiler.getitem_column_array([key]))
1843 s._parent = self
KeyError: 'p0'
|
KeyError
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
if callable(by):
by = self.index.map(by)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by)._query_compiler
elif is_list_like(by):
if isinstance(by, Series):
idx_name = by.name
by = by.values
mismatch = len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
if mismatch and all(obj in self for obj in by):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
**kwargs,
)
|
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs,
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = self._get_axis_number(axis)
idx_name = None
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by)._query_compiler
elif is_list_like(by):
if isinstance(by, Series):
idx_name = by.name
by = by.values
mismatch = len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
if mismatch and all(obj in self for obj in by):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
**kwargs,
)
|
https://github.com/modin-project/modin/issues/756
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-16-e142f2bb04bf> in <module>()
----> 1 g2=csv2.groupby(by=lambda x:x[1]<3)
~/.local/lib/python3.7/site-packages/modin/pandas/dataframe.py in groupby(self, by, axis, level, as_index, sort, group_keys, squeeze, observed, **kwargs)
350 idx_name = None
351 if callable(by):
--> 352 by = by(self.index)
353 elif isinstance(by, string_types):
354 idx_name = by
<ipython-input-16-e142f2bb04bf> in <lambda>(x)
----> 1 g2=csv2.groupby(by=lambda x:x[1]<3)
TypeError: '<' not supported between instances of 'tuple' and 'int'
|
TypeError
|
def _scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(axis, self._prepare_method(list_like_op))
if axis == 1 and isinstance(scalar, pandas.Series):
new_columns = self.columns.union(
[label for label in scalar.index if label not in self.columns]
)
else:
new_columns = self.columns
return self.__constructor__(new_data, self.index, new_columns)
else:
return self._map_partitions(self._prepare_method(func))
|
def _scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(axis, self._prepare_method(list_like_op))
return self.__constructor__(new_data, self.index, self.columns)
else:
return self._map_partitions(self._prepare_method(func))
|
https://github.com/modin-project/modin/issues/708
|
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
~/Documents/modin/venv/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
~/Documents/modin/venv/lib/python3.6/site-packages/IPython/lib/pretty.py in pretty(self, obj)
400 if cls is not object \
401 and callable(cls.__dict__.get('__repr__')):
--> 402 return _repr_pprint(obj, self, cycle)
403
404 return _default_pprint(obj, self, cycle)
~/Documents/modin/venv/lib/python3.6/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
695 """A pprint that just redirects to the normal repr function."""
696 # Find newlines and replace them with p.break_()
--> 697 output = repr(obj)
698 for idx,output_line in enumerate(output.splitlines()):
699 if idx:
~/Documents/modin/modin/pandas/dataframe.py in __repr__(self)
91 num_cols = pandas.get_option("max_columns") or 20
92
---> 93 result = repr(self._build_repr_df(num_rows, num_cols))
94 if len(self.index) > num_rows or len(self.columns) > num_cols:
95 # The split here is so that we don't repr pandas row lengths.
~/Documents/modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
59
60 if not hasattr(self, "columns") or len(self.columns) <= num_cols:
---> 61 head_front = head.to_pandas()
62 # Creating these empty to make the concat logic simpler
63 head_back = pandas.DataFrame()
~/Documents/modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
507 else:
508 ErrorMessage.catch_bugs_and_request_email(
--> 509 len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
510 )
511 df.index = self.index
~/Documents/modin/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition)
36 if failure_condition:
37 raise Exception(
---> 38 "Internal Error. "
39 "Please email bug_reports@modin.org with the traceback and command that"
40 " caused this error."
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
~/Documents/modin/venv/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~/Documents/modin/modin/pandas/dataframe.py in _repr_html_(self)
112 # We use pandas _repr_html_ to get a string of the HTML representation
113 # of the dataframe.
--> 114 result = self._build_repr_df(num_rows, num_cols)._repr_html_()
115 if len(self.index) > num_rows or len(self.columns) > num_cols:
116 # We split so that we insert our correct dataframe dimensions.
~/Documents/modin/modin/pandas/base.py in _build_repr_df(self, num_rows, num_cols)
59
60 if not hasattr(self, "columns") or len(self.columns) <= num_cols:
---> 61 head_front = head.to_pandas()
62 # Creating these empty to make the concat logic simpler
63 head_back = pandas.DataFrame()
~/Documents/modin/modin/backends/pandas/query_compiler.py in to_pandas(self)
507 else:
508 ErrorMessage.catch_bugs_and_request_email(
--> 509 len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
510 )
511 df.index = self.index
~/Documents/modin/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition)
36 if failure_condition:
37 raise Exception(
---> 38 "Internal Error. "
39 "Please email bug_reports@modin.org with the traceback and command that"
40 " caused this error."
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def read_sql(cls, sql, con, index_col=None, **kwargs):
"""Reads a SQL query or database table into a DataFrame.
Args:
sql: string or SQLAlchemy Selectable (select or text object) SQL query to be
executed or a table name.
con: SQLAlchemy connectable (engine/connection) or database string URI or
DBAPI2 connection (fallback mode)
index_col: Column(s) to set as index(MultiIndex).
kwargs: Pass into pandas.read_sql function.
"""
if cls.read_sql_remote_task is None:
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
import sqlalchemy as sa
# In the case that we are given a SQLAlchemy Connection or Engine, the objects
# are not pickleable. We have to convert it to the URL string and connect from
# each of the workers.
if isinstance(con, (sa.engine.Engine, sa.engine.Connection)):
con = repr(con.engine.url)
row_cnt_query = "SELECT COUNT(*) FROM ({}) as foo".format(sql)
row_cnt = pandas.read_sql(row_cnt_query, con).squeeze()
cols_names_df = pandas.read_sql(
"SELECT * FROM ({}) as foo LIMIT 0".format(sql), con, index_col=index_col
)
cols_names = cols_names_df.columns
num_parts = cls.frame_mgr_cls._compute_num_partitions()
partition_ids = []
index_ids = []
limit = math.ceil(row_cnt / num_parts)
for part in range(num_parts):
offset = part * limit
query = "SELECT * FROM ({}) as foo LIMIT {} OFFSET {}".format(
sql, limit, offset
)
partition_id = cls.read_sql_remote_task._remote(
args=(num_parts, query, con, index_col, kwargs),
num_return_vals=num_parts + 1,
)
partition_ids.append(
[cls.frame_partition_cls(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
if index_col is None: # sum all lens returned from partitions
index_lens = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(index_lens))
else: # concat index returned from partitions
index_lst = [x for part_index in ray.get(index_ids) for x in part_index]
new_index = pandas.Index(index_lst).set_names(index_col)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names
)
return new_query_compiler
|
def read_sql(cls, sql, con, index_col=None, **kwargs):
"""Reads a SQL query or database table into a DataFrame.
Args:
sql: string or SQLAlchemy Selectable (select or text object) SQL query to be
executed or a table name.
con: SQLAlchemy connectable (engine/connection) or database string URI or
DBAPI2 connection (fallback mode)
index_col: Column(s) to set as index(MultiIndex).
kwargs: Pass into pandas.read_sql function.
"""
if cls.read_sql_remote_task is None:
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
row_cnt_query = "SELECT COUNT(*) FROM ({} as foo)".format(sql)
row_cnt = pandas.read_sql(row_cnt_query, con).squeeze()
cols_names_df = pandas.read_sql(
"SELECT * FROM ({} as foo) LIMIT 0".format(sql), con, index_col=index_col
)
cols_names = cols_names_df.columns
num_parts = cls.frame_mgr_cls._compute_num_partitions()
partition_ids = []
index_ids = []
limit = math.ceil(row_cnt / num_parts)
for part in range(num_parts):
offset = part * limit
query = "SELECT * FROM ({} as foo) LIMIT {} OFFSET {}".format(
sql, limit, offset
)
partition_id = cls.read_sql_remote_task._remote(
args=(num_parts, query, con, index_col, kwargs),
num_return_vals=num_parts + 1,
)
partition_ids.append(
[cls.frame_partition_cls(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
if index_col is None: # sum all lens returned from partitions
index_lens = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(index_lens))
else: # concat index returned from partitions
index_lst = [x for part_index in ray.get(index_ids) for x in part_index]
new_index = pandas.Index(index_lst).set_names(index_col)
new_query_compiler = cls.query_compiler_cls(
cls.frame_mgr_cls(np.array(partition_ids)), new_index, cols_names
)
return new_query_compiler
|
https://github.com/modin-project/modin/issues/711
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 383, in put_object
self.store_and_register(object_id, value)
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 317, in store_and_register
self.task_driver_id))
File "/usr/local/lib/python3.7/site-packages/ray/utils.py", line 475, in _wrapper
return orig_attr(*args, **kwargs)
File "pyarrow/_plasma.pyx", line 496, in pyarrow._plasma.PlasmaClient.put
File "pyarrow/serialization.pxi", line 355, in pyarrow.lib.serialize
File "pyarrow/serialization.pxi", line 150, in pyarrow.lib.SerializationContext._serialize_callback
File "/usr/local/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle.py", line 952, in dumps
cp.dump(obj)
File "/usr/local/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle.py", line 267, in dump
return Pickler.dump(self, obj)
File "/usr/local/lib/python3.7/pickle.py", line 437, in dump
self.save(obj)
File "/usr/local/lib/python3.7/pickle.py", line 524, in save
rv = reduce(self.proto)
TypeError: can't pickle _thread.RLock objects
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "src/main.py", line 55, in <module>
start_date=start_date)
File "/src/preparation/data_source.py", line 16, in import_data
data = self._load_data(app_ids, granularity, f'{date.today()}_{self._source}_{granularity}.csv')
File "/src/preparation/data_source.py", line 27, in _load_data
data = pd.read_sql(query, conn, parse_dates=['install_date'])
File "/usr/local/lib/python3.7/site-packages/modin/pandas/io.py", line 324, in read_sql
return DataFrame(query_compiler=BaseFactory.read_sql(**kwargs))
File "/usr/local/lib/python3.7/site-packages/modin/data_management/factories.py", line 155, in read_sql
return cls._determine_engine()._read_sql(**kwargs)
File "/usr/local/lib/python3.7/site-packages/modin/data_management/factories.py", line 159, in _read_sql
return cls.io_cls.read_sql(**kwargs)
File "/usr/local/lib/python3.7/site-packages/modin/engines/ray/generic/io.py", line 805, in read_sql
num_return_vals=num_parts + 1,
File "/usr/local/lib/python3.7/site-packages/ray/remote_function.py", line 152, in _remote
return invocation(args, kwargs)
File "/usr/local/lib/python3.7/site-packages/ray/remote_function.py", line 143, in invocation
resources=resources)
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 636, in submit_task
args_for_raylet.append(put(arg))
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 2227, in put
worker.put_object(object_id, value)
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 403, in put_object
self.store_and_register(object_id, value)
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 317, in store_and_register
self.task_driver_id))
File "/usr/local/lib/python3.7/site-packages/ray/utils.py", line 475, in _wrapper
return orig_attr(*args, **kwargs)
File "pyarrow/_plasma.pyx", line 496, in pyarrow._plasma.PlasmaClient.put
File "pyarrow/serialization.pxi", line 355, in pyarrow.lib.serialize
File "pyarrow/serialization.pxi", line 150, in pyarrow.lib.SerializationContext._serialize_callback
File "/usr/local/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle.py", line 952, in dumps
cp.dump(obj)
File "/usr/local/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle.py", line 267, in dump
return Pickler.dump(self, obj)
File "/usr/local/lib/python3.7/pickle.py", line 437, in dump
self.save(obj)
File "/usr/local/lib/python3.7/pickle.py", line 549, in save
self.save_reduce(obj=obj, *rv)
File "/usr/local/lib/python3.7/pickle.py", line 662, in save_reduce
save(state)
File "/usr/local/lib/python3.7/pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/local/lib/python3.7/pickle.py", line 856, in save_dict
self._batch_setitems(obj.items())
File "/usr/local/lib/python3.7/pickle.py", line 882, in _batch_setitems
save(v)
File "/usr/local/lib/python3.7/pickle.py", line 549, in save
self.save_reduce(obj=obj, *rv)
File "/usr/local/lib/python3.7/pickle.py", line 662, in save_reduce
save(state)
File "/usr/local/lib/python3.7/pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/local/lib/python3.7/pickle.py", line 856, in save_dict
self._batch_setitems(obj.items())
File "/usr/local/lib/python3.7/pickle.py", line 882, in _batch_setitems
save(v)
File "/usr/local/lib/python3.7/pickle.py", line 549, in save
self.save_reduce(obj=obj, *rv)
File "/usr/local/lib/python3.7/pickle.py", line 662, in save_reduce
save(state)
File "/usr/local/lib/python3.7/pickle.py", line 504, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/local/lib/python3.7/pickle.py", line 856, in save_dict
self._batch_setitems(obj.items())
File "/usr/local/lib/python3.7/pickle.py", line 882, in _batch_setitems
save(v)
File "/usr/local/lib/python3.7/pickle.py", line 524, in save
rv = reduce(self.proto)
TypeError: can't pickle _thread._local objects
|
TypeError
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
by=None,
):
"""Sort a DataFrame by one of the indices (columns or index).
Args:
axis: The axis to sort over.
level: The MultiIndex level to sort over.
ascending: Ascending or descending
inplace: Whether or not to update this DataFrame inplace.
kind: How to perform the sort.
na_position: Where to position NA on the sort.
sort_remaining: On Multilevel Index sort based on all levels.
by: (Deprecated) argument to pass to sort_values.
Returns:
A sorted DataFrame
"""
axis = self._get_axis_number(axis)
if level is not None or (
(axis == 0 and isinstance(self.index, pandas.MultiIndex))
or (axis == 1 and isinstance(self.columns, pandas.MultiIndex))
):
new_query_compiler = self._default_to_pandas(
"sort_index",
axis=axis,
level=level,
ascending=ascending,
inplace=False,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
)._query_compiler
return self._create_or_update_from_compiler(new_query_compiler, inplace)
if by is not None:
warnings.warn(
"by argument to sort_index is deprecated, please use .sort_values(by=...)",
FutureWarning,
stacklevel=2,
)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
new_query_compiler = self._query_compiler.sort_index(
axis=axis, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_inplace(new_query_compiler=new_query_compiler)
else:
return self.__constructor__(query_compiler=new_query_compiler)
|
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
by=None,
):
"""Sort a DataFrame by one of the indices (columns or index).
Args:
axis: The axis to sort over.
level: The MultiIndex level to sort over.
ascending: Ascending or descending
inplace: Whether or not to update this DataFrame inplace.
kind: How to perform the sort.
na_position: Where to position NA on the sort.
sort_remaining: On Multilevel Index sort based on all levels.
by: (Deprecated) argument to pass to sort_values.
Returns:
A sorted DataFrame
"""
axis = self._get_axis_number(axis)
if level is not None:
new_query_compiler = self._default_to_pandas(
"sort_index",
axis=axis,
level=level,
ascending=ascending,
inplace=False,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
)
return self._create_or_update_from_compiler(new_query_compiler, inplace)
if by is not None:
warnings.warn(
"by argument to sort_index is deprecated, please use .sort_values(by=...)",
FutureWarning,
stacklevel=2,
)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
new_query_compiler = self._query_compiler.sort_index(
axis=axis, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_inplace(new_query_compiler=new_query_compiler)
else:
return self.__constructor__(query_compiler=new_query_compiler)
|
https://github.com/modin-project/modin/issues/698
|
NotImplementedError Traceback (most recent call last)
<ipython-input-7-06ffb9961dfc> in <module>
----> 1 hh.sort_index(axis=0)
~/.pyenv/versions/3.7.3/envs/env/lib/python3.7/site-packages/modin/pandas/base.py in sort_index(self, axis, level, ascending, inplace, kind, na_position, sort_remaining, by)
2570 return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace)
2571 new_query_compiler = self._query_compiler.sort_index(
-> 2572 axis=axis, ascending=ascending, kind=kind, na_position=na_position
2573 )
2574 if inplace:
~/.pyenv/versions/3.7.3/envs/env/lib/python3.7/site-packages/modin/backends/pandas/query_compiler.py in sort_index(self, **kwargs)
1762 new_index = self.index
1763 else:
-> 1764 new_index = pandas.Series(self.index).sort_values(**kwargs)
1765 new_columns = self.columns
1766 return self.__constructor__(
~/.pyenv/versions/3.7.3/envs/env/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
175
176 if isinstance(data, MultiIndex):
--> 177 raise NotImplementedError("initializing a Series from a "
178 "MultiIndex is not supported")
179 elif isinstance(data, Index):
NotImplementedError: initializing a Series from a MultiIndex is not supported
|
NotImplementedError
|
def transpose(self, *args, **kwargs):
"""Transposes this DataManager.
Returns:
Transposed new DataManager.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the data within the blocks.
new_manager = self.__constructor__(
new_data, self.columns, self.index, is_transposed=self._is_transposed ^ 1
)
return new_manager
|
def transpose(self, *args, **kwargs):
"""Transposes this DataManager.
Returns:
Transposed new DataManager.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the
new_manager = self.__constructor__(
new_data, self.columns, self.index, is_transposed=self._is_transposed ^ 1
)
return new_manager
|
https://github.com/modin-project/modin/issues/646
|
Traceback (most recent call last):
File "modin_operations.py", line 39, in <module>
measure_operations(args.mode)
File "modin_operations.py", line 31, in measure_operations
measure_time(read_operation, pd, filename)
File "modin_operations.py", line 7, in measure_time
ret_val = func(*args)
File "modin_operations.py", line 15, in read_operation
print(df.memory_usage())
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 3263, in __str__
return repr(self)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/series.py", line 213, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 61, in _build_repr_df
head_front = head.to_pandas()
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 487, in to_pandas
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/error_message.py", line 38, in catch_bugs_and_request_email
"Internal Error. "
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
A new QueryCompiler object containing the memory usage of each column.
"""
def memory_usage_builder(df, **kwargs):
return df.memory_usage(**kwargs)
def sum_memory_usage(df):
return df.sum()
map_func = self._build_mapreduce_func(memory_usage_builder, **kwargs)
reduce_func = self._build_mapreduce_func(sum_memory_usage)
return self._full_reduce(0, map_func, reduce_func)
|
def memory_usage(self, **kwargs):
"""Returns the memory usage of each column.
Returns:
A new QueryCompiler object containing the memory usage of each column.
"""
def memory_usage_builder(df, **kwargs):
return df.memory_usage(**kwargs)
func = self._build_mapreduce_func(memory_usage_builder, **kwargs)
return self._full_axis_reduce(0, func)
|
https://github.com/modin-project/modin/issues/646
|
Traceback (most recent call last):
File "modin_operations.py", line 39, in <module>
measure_operations(args.mode)
File "modin_operations.py", line 31, in measure_operations
measure_time(read_operation, pd, filename)
File "modin_operations.py", line 7, in measure_time
ret_val = func(*args)
File "modin_operations.py", line 15, in read_operation
print(df.memory_usage())
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 3263, in __str__
return repr(self)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/series.py", line 213, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 61, in _build_repr_df
head_front = head.to_pandas()
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 487, in to_pandas
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/error_message.py", line 38, in catch_bugs_and_request_email
"Internal Error. "
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def _get_partitions(self):
if not self._filtered_empties or (
self._lengths_cache is not None and self._widths_cache is not None
):
self._partitions_cache = np.array(
[
row
for row in [
[
self._partitions_cache[i][j]
for j in range(len(self._partitions_cache[i]))
if self.block_lengths[i] != 0 and self.block_widths[j] != 0
]
for i in range(len(self._partitions_cache))
]
if len(row)
]
)
self._remove_empty_blocks()
self._filtered_empties = True
return self._partitions_cache
|
def _get_partitions(self):
if (
not self._filtered_empties
and self._lengths_cache is not None
and self._widths_cache is not None
):
self._partitions_cache = np.array(
[
row
for row in [
[
self._partitions_cache[i][j]
for j in range(len(self._partitions_cache[i]))
if self.block_lengths[i] != 0 and self.block_widths[j] != 0
]
for i in range(len(self._partitions_cache))
]
if len(row)
]
)
self._remove_empty_blocks()
self._filtered_empties = True
return self._partitions_cache
|
https://github.com/modin-project/modin/issues/646
|
Traceback (most recent call last):
File "modin_operations.py", line 39, in <module>
measure_operations(args.mode)
File "modin_operations.py", line 31, in measure_operations
measure_time(read_operation, pd, filename)
File "modin_operations.py", line 7, in measure_time
ret_val = func(*args)
File "modin_operations.py", line 15, in read_operation
print(df.memory_usage())
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 3263, in __str__
return repr(self)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/series.py", line 213, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 61, in _build_repr_df
head_front = head.to_pandas()
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 487, in to_pandas
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/error_message.py", line 38, in catch_bugs_and_request_email
"Internal Error. "
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def __repr__(self):
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
temp_df = self._build_repr_df(num_rows, num_cols)
if isinstance(temp_df, pandas.DataFrame):
temp_df = temp_df.iloc[:, 0]
temp_str = repr(temp_df)
if self.name is not None:
name_str = "Name: {}, ".format(str(self.name))
else:
name_str = ""
if len(self.index) > num_rows:
len_str = "Length: {}, ".format(len(self.index))
else:
len_str = ""
dtype_str = "dtype: {}".format(temp_str.rsplit("dtype: ", 1)[-1])
if len(self) == 0:
return "Series([], {}{}".format(name_str, dtype_str)
return temp_str.rsplit("\nName:", 1)[0] + "\n{}{}{}".format(
name_str, len_str, dtype_str
)
|
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
temp_df = self._build_repr_df(num_rows, num_cols)
if isinstance(temp_df, pandas.DataFrame):
temp_df = temp_df.iloc[:, 0]
temp_str = repr(temp_df)
if self.name is not None:
name_str = "Name: {}, ".format(str(self.name))
else:
name_str = ""
if len(self.index) > num_rows:
len_str = "Length: {}, ".format(len(self.index))
else:
len_str = ""
dtype_str = "dtype: {}".format(temp_str.rsplit("dtype: ", 1)[-1])
if len(self) == 0:
return "Series([], {}{}".format(name_str, dtype_str)
return temp_str.rsplit("\nName:", 1)[0] + "\n{}{}{}".format(
name_str, len_str, dtype_str
)
|
https://github.com/modin-project/modin/issues/646
|
Traceback (most recent call last):
File "modin_operations.py", line 39, in <module>
measure_operations(args.mode)
File "modin_operations.py", line 31, in measure_operations
measure_time(read_operation, pd, filename)
File "modin_operations.py", line 7, in measure_time
ret_val = func(*args)
File "modin_operations.py", line 15, in read_operation
print(df.memory_usage())
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 3263, in __str__
return repr(self)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/series.py", line 213, in __repr__
temp_df = self._build_repr_df(num_rows, num_cols)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/pandas/base.py", line 61, in _build_repr_df
head_front = head.to_pandas()
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/backends/pandas/query_compiler.py", line 487, in to_pandas
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
File "/Users/krutivanatwala/Documents/python-experiments/venv/lib/python3.6/site-packages/modin/error_message.py", line 38, in catch_bugs_and_request_email
"Internal Error. "
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
new_columns = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
.columns
)
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
include = kwargs.get("include", None)
if len(new_columns) != 0 and include is not None:
if not isinstance(include, np.dtype) and include == "all":
new_columns = self.columns
else:
new_columns = self.dtypes[
[
any(
(isinstance(inc, np.dtype) and inc == d)
or (
not isinstance(inc, np.dtype)
and inc.__subclasscheck__(getattr(np, d.__str__()))
)
for inc in include
)
for d in self.dtypes.values
]
].index
elif len(new_columns) == 0:
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
exclude = kwargs.get("exclude", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if isinstance(exclude, list):
exclude.extend(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
# Update `new_columns` to reflect the included types
new_columns = self.dtypes[~self.dtypes.isin(exclude)].index
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
|
https://github.com/modin-project/modin/issues/520
|
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
~/.local/lib/python3.6/site-packages/IPython/lib/pretty.py in pretty(self, obj)
400 if cls is not object \
401 and callable(cls.__dict__.get('__repr__')):
--> 402 return _repr_pprint(obj, self, cycle)
403
404 return _default_pprint(obj, self, cycle)
~/.local/lib/python3.6/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
695 """A pprint that just redirects to the normal repr function."""
696 # Find newlines and replace them with p.break_()
--> 697 output = repr(obj)
698 for idx,output_line in enumerate(output.splitlines()):
699 if idx:
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/pandas/dataframe.py in __repr__(self)
136 num_cols = 30
137
--> 138 result = repr(self._build_repr_df(num_rows, num_cols))
139 if len(self.index) > num_rows or len(self.columns) > num_cols:
140 # The split here is so that we don't repr pandas row lengths.
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/pandas/dataframe.py in _build_repr_df(self, num_rows, num_cols)
107
108 if len(self.columns) <= num_cols:
--> 109 head_front = head.to_pandas()
110 # Creating these empty to make the concat logic simpler
111 head_back = pandas.DataFrame()
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/data_management/query_compiler/pandas_query_compiler.py in to_pandas(self)
480 else:
481 ErrorMessage.catch_bugs_and_request_email(
--> 482 len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
483 )
484 df.index = self.index
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition)
36 if failure_condition:
37 raise Exception(
---> 38 "Internal Error. "
39 "Please email bug_reports@modin.org with the traceback and command that"
40 " caused this error."
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/pandas/dataframe.py in _repr_html_(self)
158 # We use pandas _repr_html_ to get a string of the HTML representation
159 # of the dataframe.
--> 160 result = self._build_repr_df(num_rows, num_cols)._repr_html_()
161 if len(self.index) > num_rows or len(self.columns) > num_cols:
162 # We split so that we insert our correct dataframe dimensions.
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/pandas/dataframe.py in _build_repr_df(self, num_rows, num_cols)
107
108 if len(self.columns) <= num_cols:
--> 109 head_front = head.to_pandas()
110 # Creating these empty to make the concat logic simpler
111 head_back = pandas.DataFrame()
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/data_management/query_compiler/pandas_query_compiler.py in to_pandas(self)
480 else:
481 ErrorMessage.catch_bugs_and_request_email(
--> 482 len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
483 )
484 df.index = self.index
~/.conda/envs/deepdream/lib/python3.6/site-packages/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition)
36 if failure_condition:
37 raise Exception(
---> 38 "Internal Error. "
39 "Please email bug_reports@modin.org with the traceback and command that"
40 " caused this error."
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this err
|
Exception
|
def squeeze(self, ndim=0, axis=None):
to_squeeze = self.to_pandas()
# This is the case for 1xN or Nx1 DF - Need to call squeeze
if ndim == 1:
if axis is None:
axis = 0 if self.data.shape[1] > 1 else 1
squeezed = pandas.Series(to_squeeze.squeeze())
# In the case of `MultiIndex`, we already have the correct index and naming
# because we are going from pandas above. This step is to correct the
# `Series` to have the correct name and index.
if not isinstance(squeezed.index, pandas.MultiIndex):
scaler_axis = self.columns if axis else self.index
non_scaler_axis = self.index if axis else self.columns
squeezed.name = scaler_axis[0]
squeezed.index = non_scaler_axis
return squeezed
# This is the case for a 1x1 DF - We don't need to squeeze
else:
return to_squeeze.values[0][0]
|
def squeeze(self, ndim=0, axis=None):
to_squeeze = self.data.to_pandas()
# This is the case for 1xN or Nx1 DF - Need to call squeeze
if ndim == 1:
if axis is None:
axis = 0 if self.data.shape[1] > 1 else 1
squeezed = pandas.Series(to_squeeze.squeeze(axis))
scaler_axis = self.columns if axis else self.index
non_scaler_axis = self.index if axis else self.columns
squeezed.name = scaler_axis[0]
squeezed.index = non_scaler_axis
return squeezed
# This is the case for a 1x1 DF - We don't need to squeeze
else:
return to_squeeze.values[0][0]
|
https://github.com/modin-project/modin/issues/504
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-f7a945237ab9> in <module>()
3 header=[0,1,2,3], index_col=0)
4 DF2.loc[1]
----> 5 DF2.loc[1, 'Presidents']
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, key)
232 row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
233 ndim = self._expand_dim(row_lookup, col_lookup, ndim)
--> 234 result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
235 return result
236
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, row_lookup, col_lookup, ndim)
163 single_axis = 1 if self.col_scaler else 0
164 return SeriesView(
--> 165 qc_view.squeeze(ndim=1, axis=single_axis),
166 self.df,
167 (row_lookup, col_lookup),
~/anaconda3/lib/python3.7/site-packages/modin/data_management/query_compiler/pandas_query_compiler.py in squeeze(self, ndim, axis)
2781 if axis is None:
2782 axis = 0 if self.data.shape[1] > 1 else 1
-> 2783 squeezed = pandas.Series(to_squeeze.squeeze(axis))
2784 scaler_axis = self.columns if axis else self.index
2785 non_scaler_axis = self.index if axis else self.columns
~/anaconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
260 else:
261 data = sanitize_array(data, index, dtype, copy,
--> 262 raise_cast_failure=True)
263
264 data = SingleBlockManager(data, index, fastpath=True)
~/anaconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
658 raise Exception('Data must be 1-dimensional')
659 else:
--> 660 subarr = com.asarray_tuplesafe(data, dtype=dtype)
661
662 # This is to prevent mixed-type Series getting all casted to
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in asarray_tuplesafe(values, dtype)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in <listcomp>(.0)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
TypeError: 'int' object is not iterable
|
TypeError
|
def __getitem__(self, key):
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
self._handle_enlargement(row_loc, col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
ndim = (0 if len(row_lookup) == 1 else 1) + (0 if len(col_lookup) == 1 else 1)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
# Pandas drops the levels that are in the `loc`, so we have to as well.
if hasattr(result, "index") and isinstance(result.index, pandas.MultiIndex):
if (
isinstance(result, pandas.Series)
and not isinstance(col_loc, slice)
and all(col_loc[i] in result.index.levels[i] for i in range(len(col_loc)))
):
result.index = result.index.droplevel(list(range(len(col_loc))))
elif all(row_loc[i] in result.index.levels[i] for i in range(len(row_loc))):
result.index = result.index.droplevel(list(range(len(row_loc))))
if (
hasattr(result, "columns")
and isinstance(result.columns, pandas.MultiIndex)
and all(col_loc[i] in result.columns.levels[i] for i in range(len(col_loc)))
):
result.columns = result.columns.droplevel(list(range(len(col_loc))))
return result
|
def __getitem__(self, key):
row_loc, col_loc, ndim, self.row_scaler, self.col_scaler = _parse_tuple(key)
self._handle_enlargement(row_loc, col_loc)
row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
ndim = self._expand_dim(row_lookup, col_lookup, ndim)
result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
return result
|
https://github.com/modin-project/modin/issues/504
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-f7a945237ab9> in <module>()
3 header=[0,1,2,3], index_col=0)
4 DF2.loc[1]
----> 5 DF2.loc[1, 'Presidents']
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, key)
232 row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
233 ndim = self._expand_dim(row_lookup, col_lookup, ndim)
--> 234 result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
235 return result
236
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, row_lookup, col_lookup, ndim)
163 single_axis = 1 if self.col_scaler else 0
164 return SeriesView(
--> 165 qc_view.squeeze(ndim=1, axis=single_axis),
166 self.df,
167 (row_lookup, col_lookup),
~/anaconda3/lib/python3.7/site-packages/modin/data_management/query_compiler/pandas_query_compiler.py in squeeze(self, ndim, axis)
2781 if axis is None:
2782 axis = 0 if self.data.shape[1] > 1 else 1
-> 2783 squeezed = pandas.Series(to_squeeze.squeeze(axis))
2784 scaler_axis = self.columns if axis else self.index
2785 non_scaler_axis = self.index if axis else self.columns
~/anaconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
260 else:
261 data = sanitize_array(data, index, dtype, copy,
--> 262 raise_cast_failure=True)
263
264 data = SingleBlockManager(data, index, fastpath=True)
~/anaconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
658 raise Exception('Data must be 1-dimensional')
659 else:
--> 660 subarr = com.asarray_tuplesafe(data, dtype=dtype)
661
662 # This is to prevent mixed-type Series getting all casted to
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in asarray_tuplesafe(values, dtype)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in <listcomp>(.0)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
TypeError: 'int' object is not iterable
|
TypeError
|
def _compute_lookup(self, row_loc, col_loc) -> Tuple[pandas.Index, pandas.Index]:
if is_list_like(row_loc) and len(row_loc) == 1:
if (
isinstance(self.qc.index.values[0], np.datetime64)
and type(row_loc[0]) != np.datetime64
):
row_loc = [pandas.to_datetime(row_loc[0])]
if isinstance(row_loc, slice):
row_lookup = self.qc.index.to_series().loc[row_loc].values
elif isinstance(self.qc.index, pandas.MultiIndex):
row_lookup = self.qc.index[self.qc.index.get_locs(row_loc)]
else:
row_lookup = self.qc.index[self.qc.index.get_indexer_for(row_loc)]
if isinstance(col_loc, slice):
col_lookup = self.qc.columns.to_series().loc[col_loc].values
elif isinstance(self.qc.columns, pandas.MultiIndex):
col_lookup = self.qc.columns[self.qc.columns.get_locs(col_loc)]
else:
col_lookup = self.qc.columns[self.qc.columns.get_indexer_for(col_loc)]
return row_lookup, col_lookup
|
def _compute_lookup(self, row_loc, col_loc) -> Tuple[pandas.Index, pandas.Index]:
if isinstance(row_loc, list) and len(row_loc) == 1:
if (
isinstance(self.qc.index.values[0], np.datetime64)
and type(row_loc[0]) != np.datetime64
):
row_loc = [pandas.to_datetime(row_loc[0])]
row_lookup = self.qc.index.to_series().loc[row_loc].index
col_lookup = self.qc.columns.to_series().loc[col_loc].index
return row_lookup, col_lookup
|
https://github.com/modin-project/modin/issues/504
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-f7a945237ab9> in <module>()
3 header=[0,1,2,3], index_col=0)
4 DF2.loc[1]
----> 5 DF2.loc[1, 'Presidents']
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, key)
232 row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
233 ndim = self._expand_dim(row_lookup, col_lookup, ndim)
--> 234 result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
235 return result
236
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, row_lookup, col_lookup, ndim)
163 single_axis = 1 if self.col_scaler else 0
164 return SeriesView(
--> 165 qc_view.squeeze(ndim=1, axis=single_axis),
166 self.df,
167 (row_lookup, col_lookup),
~/anaconda3/lib/python3.7/site-packages/modin/data_management/query_compiler/pandas_query_compiler.py in squeeze(self, ndim, axis)
2781 if axis is None:
2782 axis = 0 if self.data.shape[1] > 1 else 1
-> 2783 squeezed = pandas.Series(to_squeeze.squeeze(axis))
2784 scaler_axis = self.columns if axis else self.index
2785 non_scaler_axis = self.index if axis else self.columns
~/anaconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
260 else:
261 data = sanitize_array(data, index, dtype, copy,
--> 262 raise_cast_failure=True)
263
264 data = SingleBlockManager(data, index, fastpath=True)
~/anaconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
658 raise Exception('Data must be 1-dimensional')
659 else:
--> 660 subarr = com.asarray_tuplesafe(data, dtype=dtype)
661
662 # This is to prevent mixed-type Series getting all casted to
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in asarray_tuplesafe(values, dtype)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in <listcomp>(.0)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
TypeError: 'int' object is not iterable
|
TypeError
|
def __getattribute__(self, item):
default_behaviors = [
"__init__",
"series",
"parent_df",
"_loc",
"__arithmetic_op__",
"__comparisons__",
"__class__",
"index",
"_get_index",
"_set_index",
]
if item not in default_behaviors:
method = self.series.__getattribute__(item)
# Certain operations like `at`, `loc`, `iloc`, etc. are callable because in
# pandas they are equivalent to classes. They are verified here because they
# cannot be overridden with the functions below. This generally solves the
# problem where the instance property is callable, but the class property is
# not.
# The isclass check is to ensure that we return the correct type. Some of
# the objects that are called result in classes being returned, and we don't
# want to override with our own function.
is_callable = (
callable(method)
and callable(getattr(type(self.series), item))
and not inspect.isclass(getattr(type(self.series), item))
)
try:
has_inplace_param = is_callable and "inplace" in str(
inspect.signature(method)
)
# This will occur on Python2
except AttributeError:
has_inplace_param = is_callable and "inplace" in str(
inspect.getargspec(method)
)
if is_callable and has_inplace_param and self.parent_df is not None:
def inplace_handler(*args, **kwargs):
"""Replaces the default behavior of methods with inplace kwarg.
Note: This method will modify the DataFrame this Series is attached
to when `inplace` is True. Instead of rewriting or overriding
every method that uses `inplace`, we use this handler.
This handler will first check that the keyword argument passed
for `inplace` is True, if not then it will just return the
result of the operation requested.
If `inplace` is True, do the operation, keeping track of the
previous length. This is because operations like `dropna` still
propagate back to the DataFrame that holds the Series.
If the length did not change, we propagate the inplace changes
of the operation back to the original DataFrame with
`__setitem__`.
If the length changed, we just need to do a `reindex` on the
parent DataFrame. This will propagate the inplace operation
(e.g. `dropna`) back to the parent DataFrame.
See notes in SeriesView class about when it is okay to return a
pandas Series vs a SeriesView.
Returns:
If `inplace` is True: None, else: A new Series.
"""
if kwargs.get("inplace", False):
prev_len = len(self.series)
self.series.__getattribute__(item)(*args, **kwargs)
if prev_len == len(self.series):
self.parent_df.loc[self._loc] = self.series
else:
self.parent_df.reindex(index=self.series.index, copy=False)
return None
else:
return self.series.__getattribute__(item)(*args, **kwargs)
# We replace the method with `inplace_handler` for inplace operations
method = inplace_handler
elif is_callable:
def other_handler(*args, **kwargs):
"""Replaces the method's args and kwargs with the Series object.
Note: This method is needed because sometimes operations like
`df['col0'].equals(df['col1'])` do not return the correct value.
This mostly has occurred in Python2, but overriding of the
method will make the behavior more deterministic for all calls.
Returns the result of `__getattribute__` from the Series this wraps.
"""
args = tuple(
arg if not isinstance(arg, SeriesView) else arg.series
for arg in args
)
kwargs = {
kw: arg if not isinstance(arg, SeriesView) else arg.series
for kw, arg in kwargs.items()
}
return self.series.__getattribute__(item)(*args, **kwargs)
method = other_handler
return method
# We need to do this hack for equality checking.
elif item == "__class__":
return self.series.__class__
else:
return object.__getattribute__(self, item)
|
def __getattribute__(self, item):
default_behaviors = [
"__init__",
"series",
"parent_df",
"_loc",
"__arithmetic_op__",
"__comparisons__",
"__class__",
]
if item not in default_behaviors:
method = self.series.__getattribute__(item)
# Certain operations like `at`, `loc`, `iloc`, etc. are callable because in
# pandas they are equivalent to classes. They are verified here because they
# cannot be overridden with the functions below. This generally solves the
# problem where the instance property is callable, but the class property is
# not.
# The isclass check is to ensure that we return the correct type. Some of
# the objects that are called result in classes being returned, and we don't
# want to override with our own function.
is_callable = (
callable(method)
and callable(getattr(type(self.series), item))
and not inspect.isclass(getattr(type(self.series), item))
)
try:
has_inplace_param = is_callable and "inplace" in str(
inspect.signature(method)
)
# This will occur on Python2
except AttributeError:
has_inplace_param = is_callable and "inplace" in str(
inspect.getargspec(method)
)
if is_callable and has_inplace_param and self.parent_df is not None:
def inplace_handler(*args, **kwargs):
"""Replaces the default behavior of methods with inplace kwarg.
Note: This method will modify the DataFrame this Series is attached
to when `inplace` is True. Instead of rewriting or overriding
every method that uses `inplace`, we use this handler.
This handler will first check that the keyword argument passed
for `inplace` is True, if not then it will just return the
result of the operation requested.
If `inplace` is True, do the operation, keeping track of the
previous length. This is because operations like `dropna` still
propagate back to the DataFrame that holds the Series.
If the length did not change, we propagate the inplace changes
of the operation back to the original DataFrame with
`__setitem__`.
If the length changed, we just need to do a `reindex` on the
parent DataFrame. This will propagate the inplace operation
(e.g. `dropna`) back to the parent DataFrame.
See notes in SeriesView class about when it is okay to return a
pandas Series vs a SeriesView.
Returns:
If `inplace` is True: None, else: A new Series.
"""
if kwargs.get("inplace", False):
prev_len = len(self.series)
self.series.__getattribute__(item)(*args, **kwargs)
if prev_len == len(self.series):
self.parent_df.loc[self._loc] = self.series
else:
self.parent_df.reindex(index=self.series.index, copy=False)
return None
else:
return self.series.__getattribute__(item)(*args, **kwargs)
# We replace the method with `inplace_handler` for inplace operations
method = inplace_handler
elif is_callable:
def other_handler(*args, **kwargs):
"""Replaces the method's args and kwargs with the Series object.
Note: This method is needed because sometimes operations like
`df['col0'].equals(df['col1'])` do not return the correct value.
This mostly has occurred in Python2, but overriding of the
method will make the behavior more deterministic for all calls.
Returns the result of `__getattribute__` from the Series this wraps.
"""
args = tuple(
arg if not isinstance(arg, SeriesView) else arg.series
for arg in args
)
kwargs = {
kw: arg if not isinstance(arg, SeriesView) else arg.series
for kw, arg in kwargs.items()
}
return self.series.__getattribute__(item)(*args, **kwargs)
method = other_handler
return method
# We need to do this hack for equality checking.
elif item == "__class__":
return self.series.__class__
else:
return object.__getattribute__(self, item)
|
https://github.com/modin-project/modin/issues/504
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-f7a945237ab9> in <module>()
3 header=[0,1,2,3], index_col=0)
4 DF2.loc[1]
----> 5 DF2.loc[1, 'Presidents']
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, key)
232 row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)
233 ndim = self._expand_dim(row_lookup, col_lookup, ndim)
--> 234 result = super(_LocIndexer, self).__getitem__(row_lookup, col_lookup, ndim)
235 return result
236
~/anaconda3/lib/python3.7/site-packages/modin/pandas/indexing.py in __getitem__(self, row_lookup, col_lookup, ndim)
163 single_axis = 1 if self.col_scaler else 0
164 return SeriesView(
--> 165 qc_view.squeeze(ndim=1, axis=single_axis),
166 self.df,
167 (row_lookup, col_lookup),
~/anaconda3/lib/python3.7/site-packages/modin/data_management/query_compiler/pandas_query_compiler.py in squeeze(self, ndim, axis)
2781 if axis is None:
2782 axis = 0 if self.data.shape[1] > 1 else 1
-> 2783 squeezed = pandas.Series(to_squeeze.squeeze(axis))
2784 scaler_axis = self.columns if axis else self.index
2785 non_scaler_axis = self.index if axis else self.columns
~/anaconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
260 else:
261 data = sanitize_array(data, index, dtype, copy,
--> 262 raise_cast_failure=True)
263
264 data = SingleBlockManager(data, index, fastpath=True)
~/anaconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
658 raise Exception('Data must be 1-dimensional')
659 else:
--> 660 subarr = com.asarray_tuplesafe(data, dtype=dtype)
661
662 # This is to prevent mixed-type Series getting all casted to
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in asarray_tuplesafe(values, dtype)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
~/anaconda3/lib/python3.7/site-packages/pandas/core/common.py in <listcomp>(.0)
238 # Avoid building an array of arrays:
239 # TODO: verify whether any path hits this except #18819 (invalid)
--> 240 values = [tuple(x) for x in values]
241 result = construct_1d_object_array_from_listlike(values)
242
TypeError: 'int' object is not iterable
|
TypeError
|
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df.T, internal_indices=internal_indices, **kwargs)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
|
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
return pandas_func(df, **kwargs)
return helper
|
https://github.com/modin-project/modin/issues/474
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-7f2184cfd6d7> in <module>
2 df = pd.DataFrame(columns=cols)
3 for col in cols:
----> 4 df[col] = np.arange(10)
~/Documents/modin/modin/pandas/dataframe.py in __setitem__(self, key, value)
4605 else:
4606 loc = self.columns.get_loc(key)
-> 4607 self.__delitem__(key)
4608 self.insert(loc=loc, column=key, value=value)
4609
~/Documents/modin/modin/pandas/dataframe.py in __delitem__(self, key)
4690 if key not in self:
4691 raise KeyError(key)
-> 4692 self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))
4693
4694 def __finalize__(self, other, method=None, **kwargs):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in delitem(self, key)
2294 # These will change the shape of the resulting data.
2295 def delitem(self, key):
-> 2296 return self.drop(columns=[key])
2297
2298 def drop(self, index=None, columns=None):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in drop(self, index, columns)
2336 new_columns = self.columns[~self.columns.isin(columns)]
2337 new_dtypes = self.dtypes.drop(columns)
-> 2338 return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
2339
2340 # END __delitem__ and drop
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __constructor__(self, block_paritions_object, index, columns, dtypes)
41 def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
42 """By default, constructor method will invoke an init"""
---> 43 return type(self)(block_paritions_object, index, columns, dtypes)
44
45 # Index, columns and dtypes objects
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes)
32 dtypes=None,
33 ):
---> 34 assert isinstance(block_partitions_object, BaseBlockPartitions)
35 self.data = block_partitions_object
36 self.index = index
AssertionError:
|
AssertionError
|
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
|
def helper(df, internal_indices=[]):
return pandas_func(df, **kwargs)
|
https://github.com/modin-project/modin/issues/474
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-7f2184cfd6d7> in <module>
2 df = pd.DataFrame(columns=cols)
3 for col in cols:
----> 4 df[col] = np.arange(10)
~/Documents/modin/modin/pandas/dataframe.py in __setitem__(self, key, value)
4605 else:
4606 loc = self.columns.get_loc(key)
-> 4607 self.__delitem__(key)
4608 self.insert(loc=loc, column=key, value=value)
4609
~/Documents/modin/modin/pandas/dataframe.py in __delitem__(self, key)
4690 if key not in self:
4691 raise KeyError(key)
-> 4692 self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))
4693
4694 def __finalize__(self, other, method=None, **kwargs):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in delitem(self, key)
2294 # These will change the shape of the resulting data.
2295 def delitem(self, key):
-> 2296 return self.drop(columns=[key])
2297
2298 def drop(self, index=None, columns=None):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in drop(self, index, columns)
2336 new_columns = self.columns[~self.columns.isin(columns)]
2337 new_dtypes = self.dtypes.drop(columns)
-> 2338 return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
2339
2340 # END __delitem__ and drop
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __constructor__(self, block_paritions_object, index, columns, dtypes)
41 def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
42 """By default, constructor method will invoke an init"""
---> 43 return type(self)(block_paritions_object, index, columns, dtypes)
44
45 # Index, columns and dtypes objects
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes)
32 dtypes=None,
33 ):
---> 34 assert isinstance(block_partitions_object, BaseBlockPartitions)
35 self.data = block_partitions_object
36 self.index = index
AssertionError:
|
AssertionError
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
include = kwargs.get("include", None)
if len(new_columns) != 0 and include is not None:
if not isinstance(include, np.dtype) and include == "all":
new_columns = self.columns
else:
new_columns = self.dtypes[
[
any(
(isinstance(inc, np.dtype) and inc == d)
or (
not isinstance(inc, np.dtype)
and inc.__subclasscheck__(getattr(np, d.__str__()))
)
for inc in include
)
for d in self.dtypes.values
]
].index
elif len(new_columns) == 0:
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
exclude = kwargs.get("exclude", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if isinstance(exclude, list):
exclude.extend(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
print(kwargs)
# Update `new_columns` to reflect the included types
new_columns = self.dtypes[~self.dtypes.isin(exclude)].index
print(new_columns)
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
include = kwargs.get("include", None)
if len(new_columns) != 0 and include is not None:
if not isinstance(include, np.dtype) and include == "all":
new_columns = self.columns
else:
new_columns = self.dtypes[
[
any(
(isinstance(inc, np.dtype) and inc == d)
or (
not isinstance(inc, np.dtype)
and inc.__subclasscheck__(getattr(np, d.__str__()))
)
for inc in include
)
for d in self.dtypes.values
]
].index
elif len(new_columns) == 0:
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
exclude = kwargs.get("exclude", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if isinstance(exclude, list):
exclude.extend(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
print(kwargs)
# Update `new_columns` to reflect the included types
new_columns = self.dtypes[~self.dtypes.isin(exclude)].index
print(new_columns)
def describe_builder(df, **kwargs):
try:
return pandas.DataFrame.describe(df, **kwargs)
except ValueError:
return pandas.DataFrame(index=df.index)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
|
https://github.com/modin-project/modin/issues/474
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-7f2184cfd6d7> in <module>
2 df = pd.DataFrame(columns=cols)
3 for col in cols:
----> 4 df[col] = np.arange(10)
~/Documents/modin/modin/pandas/dataframe.py in __setitem__(self, key, value)
4605 else:
4606 loc = self.columns.get_loc(key)
-> 4607 self.__delitem__(key)
4608 self.insert(loc=loc, column=key, value=value)
4609
~/Documents/modin/modin/pandas/dataframe.py in __delitem__(self, key)
4690 if key not in self:
4691 raise KeyError(key)
-> 4692 self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))
4693
4694 def __finalize__(self, other, method=None, **kwargs):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in delitem(self, key)
2294 # These will change the shape of the resulting data.
2295 def delitem(self, key):
-> 2296 return self.drop(columns=[key])
2297
2298 def drop(self, index=None, columns=None):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in drop(self, index, columns)
2336 new_columns = self.columns[~self.columns.isin(columns)]
2337 new_dtypes = self.dtypes.drop(columns)
-> 2338 return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
2339
2340 # END __delitem__ and drop
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __constructor__(self, block_paritions_object, index, columns, dtypes)
41 def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
42 """By default, constructor method will invoke an init"""
---> 43 return type(self)(block_paritions_object, index, columns, dtypes)
44
45 # Index, columns and dtypes objects
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes)
32 dtypes=None,
33 ):
---> 34 assert isinstance(block_partitions_object, BaseBlockPartitions)
35 self.data = block_partitions_object
36 self.index = index
AssertionError:
|
AssertionError
|
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
|
def describe_builder(df, **kwargs):
try:
return pandas.DataFrame.describe(df, **kwargs)
except ValueError:
return pandas.DataFrame(index=df.index)
|
https://github.com/modin-project/modin/issues/474
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-7f2184cfd6d7> in <module>
2 df = pd.DataFrame(columns=cols)
3 for col in cols:
----> 4 df[col] = np.arange(10)
~/Documents/modin/modin/pandas/dataframe.py in __setitem__(self, key, value)
4605 else:
4606 loc = self.columns.get_loc(key)
-> 4607 self.__delitem__(key)
4608 self.insert(loc=loc, column=key, value=value)
4609
~/Documents/modin/modin/pandas/dataframe.py in __delitem__(self, key)
4690 if key not in self:
4691 raise KeyError(key)
-> 4692 self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))
4693
4694 def __finalize__(self, other, method=None, **kwargs):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in delitem(self, key)
2294 # These will change the shape of the resulting data.
2295 def delitem(self, key):
-> 2296 return self.drop(columns=[key])
2297
2298 def drop(self, index=None, columns=None):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in drop(self, index, columns)
2336 new_columns = self.columns[~self.columns.isin(columns)]
2337 new_dtypes = self.dtypes.drop(columns)
-> 2338 return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
2339
2340 # END __delitem__ and drop
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __constructor__(self, block_paritions_object, index, columns, dtypes)
41 def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
42 """By default, constructor method will invoke an init"""
---> 43 return type(self)(block_paritions_object, index, columns, dtypes)
44
45 # Index, columns and dtypes objects
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes)
32 dtypes=None,
33 ):
---> 34 assert isinstance(block_partitions_object, BaseBlockPartitions)
35 self.data = block_partitions_object
36 self.index = index
AssertionError:
|
AssertionError
|
def apply_func_to_select_indices_along_full_axis(
self, axis, func, indices, keep_remaining=False
):
"""Applies a function to a select subset of full columns/rows.
Note: This should be used when you need to apply a function that relies
on some global information for the entire column/row, but only need
to apply a function to a subset.
Important: For your func to operate directly on the indices provided,
it must use `internal_indices` as a keyword argument.
Args:
axis: The axis to apply the function over (0 - rows, 1 - columns)
func: The function to apply.
indices: The global indices to apply the func to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseBlockPartitions object, the type of object that called this.
"""
if self.partitions.size == 0:
return self.__constructor__(np.array([[]]))
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(axis, indices)
preprocessed_func = self.preprocess_func(func)
# Since we might be keeping the remaining blocks that are not modified,
# we have to also keep the block_partitions object in the correct
# direction (transpose for columns).
if not axis:
partitions_for_apply = self.column_partitions
partitions_for_remaining = self.partitions.T
else:
partitions_for_apply = self.row_partitions
partitions_for_remaining = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
if not keep_remaining:
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func,
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
preprocessed_func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in partitions_dict
]
)
else:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in range(len(partitions_for_remaining))
]
)
return self.__constructor__(result.T) if not axis else self.__constructor__(result)
|
def apply_func_to_select_indices_along_full_axis(
self, axis, func, indices, keep_remaining=False
):
"""Applies a function to a select subset of full columns/rows.
Note: This should be used when you need to apply a function that relies
on some global information for the entire column/row, but only need
to apply a function to a subset.
Important: For your func to operate directly on the indices provided,
it must use `internal_indices` as a keyword argument.
Args:
axis: The axis to apply the function over (0 - rows, 1 - columns)
func: The function to apply.
indices: The global indices to apply the func to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseBlockPartitions object, the type of object that called this.
"""
if self.partitions.size == 0:
return np.array([[]])
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(axis, indices)
preprocessed_func = self.preprocess_func(func)
# Since we might be keeping the remaining blocks that are not modified,
# we have to also keep the block_partitions object in the correct
# direction (transpose for columns).
if not axis:
partitions_for_apply = self.column_partitions
partitions_for_remaining = self.partitions.T
else:
partitions_for_apply = self.row_partitions
partitions_for_remaining = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
if not keep_remaining:
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func,
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
preprocessed_func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in partitions_dict
]
)
else:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in range(len(partitions_for_remaining))
]
)
return self.__constructor__(result.T) if not axis else self.__constructor__(result)
|
https://github.com/modin-project/modin/issues/474
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-7f2184cfd6d7> in <module>
2 df = pd.DataFrame(columns=cols)
3 for col in cols:
----> 4 df[col] = np.arange(10)
~/Documents/modin/modin/pandas/dataframe.py in __setitem__(self, key, value)
4605 else:
4606 loc = self.columns.get_loc(key)
-> 4607 self.__delitem__(key)
4608 self.insert(loc=loc, column=key, value=value)
4609
~/Documents/modin/modin/pandas/dataframe.py in __delitem__(self, key)
4690 if key not in self:
4691 raise KeyError(key)
-> 4692 self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))
4693
4694 def __finalize__(self, other, method=None, **kwargs):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in delitem(self, key)
2294 # These will change the shape of the resulting data.
2295 def delitem(self, key):
-> 2296 return self.drop(columns=[key])
2297
2298 def drop(self, index=None, columns=None):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in drop(self, index, columns)
2336 new_columns = self.columns[~self.columns.isin(columns)]
2337 new_dtypes = self.dtypes.drop(columns)
-> 2338 return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
2339
2340 # END __delitem__ and drop
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __constructor__(self, block_paritions_object, index, columns, dtypes)
41 def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
42 """By default, constructor method will invoke an init"""
---> 43 return type(self)(block_paritions_object, index, columns, dtypes)
44
45 # Index, columns and dtypes objects
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes)
32 dtypes=None,
33 ):
---> 34 assert isinstance(block_partitions_object, BaseBlockPartitions)
35 self.data = block_partitions_object
36 self.index = index
AssertionError:
|
AssertionError
|
def __setitem__(self, key, value):
if not isinstance(key, str):
def setitem_without_string_columns(df):
df[key] = value
return df
return self._update_inplace(
self._default_to_pandas(setitem_without_string_columns)._query_compiler
)
if key not in self.columns:
self.insert(loc=len(self.columns), column=key, value=value)
elif len(self.index) == 0:
new_self = DataFrame({key: value}, columns=self.columns)
self._update_inplace(new_self._query_compiler)
else:
self._update_inplace(self._query_compiler.setitem(key, value))
|
def __setitem__(self, key, value):
if not isinstance(key, str):
return self._default_to_pandas(pandas.DataFrame.__setitem__, key, value)
if key not in self.columns:
self.insert(loc=len(self.columns), column=key, value=value)
else:
loc = self.columns.get_loc(key)
self.__delitem__(key)
self.insert(loc=loc, column=key, value=value)
|
https://github.com/modin-project/modin/issues/474
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-7f2184cfd6d7> in <module>
2 df = pd.DataFrame(columns=cols)
3 for col in cols:
----> 4 df[col] = np.arange(10)
~/Documents/modin/modin/pandas/dataframe.py in __setitem__(self, key, value)
4605 else:
4606 loc = self.columns.get_loc(key)
-> 4607 self.__delitem__(key)
4608 self.insert(loc=loc, column=key, value=value)
4609
~/Documents/modin/modin/pandas/dataframe.py in __delitem__(self, key)
4690 if key not in self:
4691 raise KeyError(key)
-> 4692 self._update_inplace(new_query_compiler=self._query_compiler.delitem(key))
4693
4694 def __finalize__(self, other, method=None, **kwargs):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in delitem(self, key)
2294 # These will change the shape of the resulting data.
2295 def delitem(self, key):
-> 2296 return self.drop(columns=[key])
2297
2298 def drop(self, index=None, columns=None):
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in drop(self, index, columns)
2336 new_columns = self.columns[~self.columns.isin(columns)]
2337 new_dtypes = self.dtypes.drop(columns)
-> 2338 return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
2339
2340 # END __delitem__ and drop
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __constructor__(self, block_paritions_object, index, columns, dtypes)
41 def __constructor__(self, block_paritions_object, index, columns, dtypes=None):
42 """By default, constructor method will invoke an init"""
---> 43 return type(self)(block_paritions_object, index, columns, dtypes)
44
45 # Index, columns and dtypes objects
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes)
32 dtypes=None,
33 ):
---> 34 assert isinstance(block_partitions_object, BaseBlockPartitions)
35 self.data = block_partitions_object
36 self.index = index
AssertionError:
|
AssertionError
|
def __constructor__(self):
raise NotImplementedError("Must be implemented in children classes")
|
def __constructor__(self):
return type(self)
|
https://github.com/modin-project/modin/issues/477
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-69e2ecd84881> in <module>
----> 1 modin_df.loc[:,['col1', 'col3', 'col3']].head()
~/Documents/modin/modin/pandas/dataframe.py in head(self, n)
1793 if n >= len(self.index):
1794 return self.copy()
-> 1795 return DataFrame(query_compiler=self._query_compiler.head(n))
1796
1797 def hist(
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in head(self, n)
2132 else:
2133 result = self.__constructor__(
-> 2134 self.data.take(0, n), self.index[:n], self.columns, self._dtype_cache
2135 )
2136 return result
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes, index_map_series, columns_map_series)
2816 numeric index.
2817 """
-> 2818 assert index_map_series is not None
2819 assert columns_map_series is not None
2820 assert index.equals(index_map_series.index)
AssertionError:
|
AssertionError
|
def _set_dtype(self, dtypes):
raise NotImplementedError("Must be implemented in children classes")
|
def _set_dtype(self, dtypes):
self._dtype_cache = dtypes
|
https://github.com/modin-project/modin/issues/477
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-69e2ecd84881> in <module>
----> 1 modin_df.loc[:,['col1', 'col3', 'col3']].head()
~/Documents/modin/modin/pandas/dataframe.py in head(self, n)
1793 if n >= len(self.index):
1794 return self.copy()
-> 1795 return DataFrame(query_compiler=self._query_compiler.head(n))
1796
1797 def hist(
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in head(self, n)
2132 else:
2133 result = self.__constructor__(
-> 2134 self.data.take(0, n), self.index[:n], self.columns, self._dtype_cache
2135 )
2136 return result
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes, index_map_series, columns_map_series)
2816 numeric index.
2817 """
-> 2818 assert index_map_series is not None
2819 assert columns_map_series is not None
2820 assert index.equals(index_map_series.index)
AssertionError:
|
AssertionError
|
def _set_data(self, new_data):
"""Note this setter will be called by the
`super(BaseQueryCompiler).__init__` function
"""
raise NotImplementedError("Must be implemented in children classes")
|
def _set_data(self, new_data):
"""Note this setter will be called by the
`super(PandasDataManagerView).__init__` function
"""
self.parent_data = new_data
|
https://github.com/modin-project/modin/issues/477
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-69e2ecd84881> in <module>
----> 1 modin_df.loc[:,['col1', 'col3', 'col3']].head()
~/Documents/modin/modin/pandas/dataframe.py in head(self, n)
1793 if n >= len(self.index):
1794 return self.copy()
-> 1795 return DataFrame(query_compiler=self._query_compiler.head(n))
1796
1797 def hist(
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in head(self, n)
2132 else:
2133 result = self.__constructor__(
-> 2134 self.data.take(0, n), self.index[:n], self.columns, self._dtype_cache
2135 )
2136 return result
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes, index_map_series, columns_map_series)
2816 numeric index.
2817 """
-> 2818 assert index_map_series is not None
2819 assert columns_map_series is not None
2820 assert index.equals(index_map_series.index)
AssertionError:
|
AssertionError
|
def _get_dtype(self):
calculate_dtype = False
if self._dtype_cache is None:
calculate_dtype = True
else:
if len(self.columns) != len(self._dtype_cache):
if all(col in self._dtype_cache.index for col in self.columns):
self._dtype_cache = pandas.Series(
{col: self._dtype_cache[col] for col in self.columns}
)
else:
calculate_dtype = True
elif not self._dtype_cache.equals(self.columns):
self._dtype_cache.index = self.columns
if calculate_dtype:
map_func = self._prepare_method(lambda df: df.dtypes)
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
self._dtype_cache = self._full_reduce(0, map_func, dtype_builder)
self._dtype_cache.index = self.columns
return self._dtype_cache
|
def _get_dtype(self):
if self._dtype_cache is None:
map_func = self._prepare_method(lambda df: df.dtypes)
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
self._dtype_cache = self._full_reduce(0, map_func, dtype_builder)
self._dtype_cache.index = self.columns
elif not self._dtype_cache.index.equals(self.columns):
self._dtype_cache.index = self.columns
return self._dtype_cache
|
https://github.com/modin-project/modin/issues/477
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-69e2ecd84881> in <module>
----> 1 modin_df.loc[:,['col1', 'col3', 'col3']].head()
~/Documents/modin/modin/pandas/dataframe.py in head(self, n)
1793 if n >= len(self.index):
1794 return self.copy()
-> 1795 return DataFrame(query_compiler=self._query_compiler.head(n))
1796
1797 def hist(
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in head(self, n)
2132 else:
2133 result = self.__constructor__(
-> 2134 self.data.take(0, n), self.index[:n], self.columns, self._dtype_cache
2135 )
2136 return result
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes, index_map_series, columns_map_series)
2816 numeric index.
2817 """
-> 2818 assert index_map_series is not None
2819 assert columns_map_series is not None
2820 assert index.equals(index_map_series.index)
AssertionError:
|
AssertionError
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
data = [
pandas.Series(dtype=self.dtypes[col_name], name=col_name)
for col_name in self.columns
]
df = pandas.concat(data, axis=1)
else:
df = pandas.DataFrame(index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the DataManager.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
dtype_dict = {
col_name: pandas.Series(dtype=self.dtypes[col_name])
for col_name in self.columns
}
df = pandas.DataFrame(dtype_dict, self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
|
https://github.com/modin-project/modin/issues/477
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-5-69e2ecd84881> in <module>
----> 1 modin_df.loc[:,['col1', 'col3', 'col3']].head()
~/Documents/modin/modin/pandas/dataframe.py in head(self, n)
1793 if n >= len(self.index):
1794 return self.copy()
-> 1795 return DataFrame(query_compiler=self._query_compiler.head(n))
1796
1797 def hist(
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in head(self, n)
2132 else:
2133 result = self.__constructor__(
-> 2134 self.data.take(0, n), self.index[:n], self.columns, self._dtype_cache
2135 )
2136 return result
~/Documents/modin/modin/data_management/query_compiler/pandas_query_compiler.py in __init__(self, block_partitions_object, index, columns, dtypes, index_map_series, columns_map_series)
2816 numeric index.
2817 """
-> 2818 assert index_map_series is not None
2819 assert columns_map_series is not None
2820 assert index.equals(index_map_series.index)
AssertionError:
|
AssertionError
|
def _read_csv_from_file_pandas_on_ray(cls, filepath, kwargs={}):
"""Constructs a DataFrame from a CSV file.
Args:
filepath (str): path to the CSV file.
npartitions (int): number of partitions for the DataFrame.
kwargs (dict): args excluding filepath provided to read_csv.
Returns:
DataFrame or Series constructed from CSV file.
"""
empty_pd_df = pandas.read_csv(filepath, **dict(kwargs, nrows=0, skipfooter=0))
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=column_names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
)
with open(filepath, "rb") as f:
# Get the BOM if necessary
prefix = b""
if kwargs.get("encoding", None) is not None:
prefix = f.readline()
partition_kwargs["skiprows"] = 1
f.seek(0, os.SEEK_SET) # Return to beginning of file
prefix_id = ray.put(prefix)
partition_kwargs_id = ray.put(partition_kwargs)
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
kwargs["skiprows"] = skiprows
cls._skip_header(f, kwargs)
# Launch tasks to read partitions
partition_ids = []
index_ids = []
total_bytes = os.path.getsize(filepath)
# Max number of partitions available
num_parts = RayBlockPartitions._compute_num_partitions()
# This is the number of splits for the columns
num_splits = min(len(column_names), num_parts)
# This is the chunksize each partition will read
chunk_size = max(1, (total_bytes - f.tell()) // num_parts)
while f.tell() < total_bytes:
start = f.tell()
f.seek(chunk_size, os.SEEK_CUR)
f.readline() # Read a whole number of lines
partition_id = _read_csv_with_offset_pandas_on_ray._remote(
args=(
filepath,
num_splits,
start,
f.tell(),
partition_kwargs_id,
prefix_id,
),
num_return_vals=num_splits + 1,
)
partition_ids.append(
[PandasOnRayRemotePartition(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
index_col = kwargs.get("index_col", None)
if index_col is None:
new_index = pandas.RangeIndex(sum(ray.get(index_ids)))
else:
new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids)
new_index = ray.get(new_index_ids)
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
new_query_compiler = PandasQueryCompiler(
RayBlockPartitions(np.array(partition_ids)), new_index, column_names
)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
return new_query_compiler
|
def _read_csv_from_file_pandas_on_ray(cls, filepath, kwargs={}):
"""Constructs a DataFrame from a CSV file.
Args:
filepath (str): path to the CSV file.
npartitions (int): number of partitions for the DataFrame.
kwargs (dict): args excluding filepath provided to read_csv.
Returns:
DataFrame or Series constructed from CSV file.
"""
empty_pd_df = pandas.read_csv(filepath, **dict(kwargs, nrows=0, skipfooter=0))
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
partition_kwargs = dict(
kwargs, header=None, names=column_names, skipfooter=0, skiprows=None
)
with open(filepath, "rb") as f:
# Get the BOM if necessary
prefix = b""
if kwargs.get("encoding", None) is not None:
prefix = f.readline()
partition_kwargs["skiprows"] = 1
f.seek(0, os.SEEK_SET) # Return to beginning of file
prefix_id = ray.put(prefix)
partition_kwargs_id = ray.put(partition_kwargs)
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
kwargs["skiprows"] = skiprows
cls._skip_header(f, kwargs)
# Launch tasks to read partitions
partition_ids = []
index_ids = []
total_bytes = os.path.getsize(filepath)
# Max number of partitions available
num_parts = RayBlockPartitions._compute_num_partitions()
# This is the number of splits for the columns
num_splits = min(len(column_names), num_parts)
# This is the chunksize each partition will read
chunk_size = max(1, (total_bytes - f.tell()) // num_parts)
while f.tell() < total_bytes:
start = f.tell()
f.seek(chunk_size, os.SEEK_CUR)
f.readline() # Read a whole number of lines
partition_id = _read_csv_with_offset_pandas_on_ray._remote(
args=(
filepath,
num_splits,
start,
f.tell(),
partition_kwargs_id,
prefix_id,
),
num_return_vals=num_splits + 1,
)
partition_ids.append(
[PandasOnRayRemotePartition(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
index_col = kwargs.get("index_col", None)
if index_col is None:
new_index = pandas.RangeIndex(sum(ray.get(index_ids)))
else:
new_index_ids = get_index.remote([empty_pd_df.index.name], *index_ids)
new_index = ray.get(new_index_ids)
new_query_compiler = PandasQueryCompiler(
RayBlockPartitions(np.array(partition_ids)), new_index, column_names
)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
return new_query_compiler
|
https://github.com/modin-project/modin/issues/470
|
import modin.pandas as pd
with open('test.tsv', 'w') as f:
f.write("Date\tTime\n"
"7/12/2018\t16:29:42.27\n"
"7/12/2018\t16:30:11.83")
pd.read_csv('test.tsv', sep='\t',
parse_dates={'timestamp':['Date', 'Time']})
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
/usr/local/lib/python3.5/dist-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
/usr/local/lib/python3.5/dist-packages/IPython/lib/pretty.py in pretty(self, obj)
398 if cls is not object \
399 and callable(cls.__dict__.get('__repr__')):
--> 400 return _repr_pprint(obj, self, cycle)
401
402 return _default_pprint(obj, self, cycle)
/usr/local/lib/python3.5/dist-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
693 """A pprint that just redirects to the normal repr function."""
694 # Find newlines and replace them with p.break_()
--> 695 output = repr(obj)
696 for idx,output_line in enumerate(output.splitlines()):
697 if idx:
~/.local/lib/python3.5/site-packages/modin/pandas/dataframe.py in __repr__(self)
124 num_cols = 30
125
--> 126 result = repr(self._build_repr_df(num_rows, num_cols))
127 if len(self.index) > num_rows or len(self.columns) > num_cols:
128 # The split here is so that we don't repr pandas row lengths.
~/.local/lib/python3.5/site-packages/modin/pandas/dataframe.py in _build_repr_df(self, num_rows, num_cols)
95
96 if len(self.columns) <= num_cols:
---> 97 head_front = head.to_pandas()
98 # Creating these empty to make the concat logic simpler
99 head_back = pandas.DataFrame()
~/.local/lib/python3.5/site-packages/modin/data_management/query_compiler/pandas_query_compiler.py in to_pandas(self)
2197 else:
2198 ErrorMessage.catch_bugs_and_request_email(
-> 2199 len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
2200 )
2201 df.index = self.index
~/.local/lib/python3.5/site-packages/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition)
36 if failure_condition:
37 raise Exception(
---> 38 "Internal Error. "
39 "Please email bug_reports@modin.org with the traceback and command that"
40 " caused this error."
Exception: Internal Error. Please email bug_reports@modin.org with the traceback and command that caused this error.
|
Exception
|
def drop_duplicates(self, subset=None, keep="first", inplace=False):
"""Return DataFrame with duplicate rows removed, optionally only considering certain columns
Args:
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns:
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, "inplace")
duplicates = self.duplicated(subset=subset, keep=keep)
(indices,) = duplicates.values.nonzero()
return self.drop(index=self.index[indices], inplace=inplace)
|
def drop_duplicates(self, subset=None, keep="first", inplace=False):
"""Return DataFrame with duplicate rows removed, optionally only considering certain columns
Args:
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns:
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, "inplace")
duplicates = self.duplicated(subset=subset, keep=keep)
(indices,) = duplicates.nonzero()
return self.drop(indices, inplace=inplace)
|
https://github.com/modin-project/modin/issues/464
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-59-b4c2776eba26> in <module>()
1 broken = pd.read_json('/Users/beckermi/Documents/modin_broken.json', orient='split')
----> 2 broken[['time', 'id']].sort_values(['id', 'time']).drop_duplicates(['id'])
~/anaconda3/envs/hsm/lib/python3.6/site-packages/modin/pandas/dataframe.py in drop_duplicates(self, subset, keep, inplace)
1279 duplicates = self.duplicated(subset=subset, keep=keep)
1280 indices, = duplicates.nonzero()
-> 1281 return self.drop(indices, inplace=inplace)
1282
1283 def duplicated(self, subset=None, keep="first"):
~/anaconda3/envs/hsm/lib/python3.6/site-packages/modin/pandas/dataframe.py in drop(self, labels, axis, index, columns, level, inplace, errors)
1225 if len(non_existant):
1226 raise ValueError(
-> 1227 "labels {} not contained in axis".format(non_existant)
1228 )
1229 else:
ValueError: labels [2] not contained in axis
|
ValueError
|
def __getattribute__(self, item):
default_behaviors = [
"__init__",
"series",
"parent_df",
"_loc",
"__arithmetic_op__",
"__comparisons__",
"__class__",
]
if item not in default_behaviors:
method = self.series.__getattribute__(item)
# Certain operations like `at`, `loc`, `iloc`, etc. are callable because in
# pandas they are equivalent to classes. They are verified here because they
# cannot be overridden with the functions below. This generally solves the
# problem where the instance property is callable, but the class property is
# not.
# The isclass check is to ensure that we return the correct type. Some of
# the objects that are called result in classes being returned, and we don't
# want to override with our own function.
is_callable = (
callable(method)
and callable(getattr(type(self.series), item))
and not inspect.isclass(getattr(type(self.series), item))
)
try:
has_inplace_param = is_callable and "inplace" in str(
inspect.signature(method)
)
# This will occur on Python2
except AttributeError:
has_inplace_param = is_callable and "inplace" in str(
inspect.getargspec(method)
)
if is_callable and has_inplace_param and self.parent_df is not None:
def inplace_handler(*args, **kwargs):
"""Replaces the default behavior of methods with inplace kwarg.
Note: This method will modify the DataFrame this Series is attached
to when `inplace` is True. Instead of rewriting or overriding
every method that uses `inplace`, we use this handler.
This handler will first check that the keyword argument passed
for `inplace` is True, if not then it will just return the
result of the operation requested.
If `inplace` is True, do the operation, keeping track of the
previous length. This is because operations like `dropna` still
propagate back to the DataFrame that holds the Series.
If the length did not change, we propagate the inplace changes
of the operation back to the original DataFrame with
`__setitem__`.
If the length changed, we just need to do a `reindex` on the
parent DataFrame. This will propagate the inplace operation
(e.g. `dropna`) back to the parent DataFrame.
See notes in SeriesView class about when it is okay to return a
pandas Series vs a SeriesView.
Returns:
If `inplace` is True: None, else: A new Series.
"""
if kwargs.get("inplace", False):
prev_len = len(self.series)
self.series.__getattribute__(item)(*args, **kwargs)
if prev_len == len(self.series):
self.parent_df.loc[self._loc] = self.series
else:
self.parent_df.reindex(index=self.series.index, copy=False)
return None
else:
return self.series.__getattribute__(item)(*args, **kwargs)
# We replace the method with `inplace_handler` for inplace operations
method = inplace_handler
elif is_callable:
def other_handler(*args, **kwargs):
"""Replaces the method's args and kwargs with the Series object.
Note: This method is needed because sometimes operations like
`df['col0'].equals(df['col1'])` do not return the correct value.
This mostly has occurred in Python2, but overriding of the
method will make the behavior more deterministic for all calls.
Returns the result of `__getattribute__` from the Series this wraps.
"""
args = tuple(
arg if not isinstance(arg, SeriesView) else arg.series
for arg in args
)
kwargs = {
kw: arg if not isinstance(arg, SeriesView) else arg.series
for kw, arg in kwargs.items()
}
return self.series.__getattribute__(item)(*args, **kwargs)
method = other_handler
return method
# We need to do this hack for equality checking.
elif item == "__class__":
return self.series.__class__
else:
return object.__getattribute__(self, item)
|
def __getattribute__(self, item):
default_behaviors = [
"__init__",
"series",
"parent_df",
"_loc",
"__arithmetic_op__",
"__comparisons__",
"__class__",
]
if item not in default_behaviors:
method = self.series.__getattribute__(item)
# Certain operations like `at`, `loc`, `iloc`, etc. are callable because in
# pandas they are equivalent to classes. They are verified here because they
# cannot be overridden with the functions below. This generally solves the
# problem where the instance property is callable, but the class property is
# not.
is_callable = callable(method) and callable(getattr(type(self.series), item))
try:
has_inplace_param = is_callable and "inplace" in str(
inspect.signature(method)
)
# This will occur on Python2
except AttributeError:
has_inplace_param = is_callable and "inplace" in str(
inspect.getargspec(method)
)
if is_callable and has_inplace_param and self.parent_df is not None:
def inplace_handler(*args, **kwargs):
"""Replaces the default behavior of methods with inplace kwarg.
Note: This method will modify the DataFrame this Series is attached
to when `inplace` is True. Instead of rewriting or overriding
every method that uses `inplace`, we use this handler.
This handler will first check that the keyword argument passed
for `inplace` is True, if not then it will just return the
result of the operation requested.
If `inplace` is True, do the operation, keeping track of the
previous length. This is because operations like `dropna` still
propagate back to the DataFrame that holds the Series.
If the length did not change, we propagate the inplace changes
of the operation back to the original DataFrame with
`__setitem__`.
If the length changed, we just need to do a `reindex` on the
parent DataFrame. This will propagate the inplace operation
(e.g. `dropna`) back to the parent DataFrame.
See notes in SeriesView class about when it is okay to return a
pandas Series vs a SeriesView.
Returns:
If `inplace` is True: None, else: A new Series.
"""
if kwargs.get("inplace", False):
prev_len = len(self.series)
self.series.__getattribute__(item)(*args, **kwargs)
if prev_len == len(self.series):
self.parent_df.loc[self._loc] = self.series
else:
self.parent_df.reindex(index=self.series.index, copy=False)
return None
else:
return self.series.__getattribute__(item)(*args, **kwargs)
# We replace the method with `inplace_handler` for inplace operations
method = inplace_handler
elif is_callable:
def other_handler(*args, **kwargs):
"""Replaces the method's args and kwargs with the Series object.
Note: This method is needed because sometimes operations like
`df['col0'].equals(df['col1'])` do not return the correct value.
This mostly has occurred in Python2, but overriding of the
method will make the behavior more deterministic for all calls.
Returns the result of `__getattribute__` from the Series this wraps.
"""
args = tuple(
arg if not isinstance(arg, SeriesView) else arg.series
for arg in args
)
kwargs = {
kw: arg if not isinstance(arg, SeriesView) else arg.series
for kw, arg in kwargs.items()
}
return self.series.__getattribute__(item)(*args, **kwargs)
method = other_handler
return method
# We need to do this hack for equality checking.
elif item == "__class__":
return self.series.__class__
else:
return object.__getattribute__(self, item)
|
https://github.com/modin-project/modin/issues/374
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/Downloads/kaggle7/kaggle7.py in <module>
126 # In[16]:
127
--> 128 app_train['DAYS_EMPLOYED'].plot.hist(title = 'Days Employment Histogram');
129 plt.xlabel('Days Employment');
130
AttributeError: 'function' object has no attribute 'hist'
|
AttributeError
|
def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False):
"""Applies a function to select indices.
Note: Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
Args:
axis: The axis to apply the func over.
func: The function to apply to these indices.
indices: The indices to apply the function to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseBlockPartitions object, the type of object that called this.
"""
if self.partitions.size == 0:
return np.array([[]])
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(
axis, indices, ordered=not keep_remaining
)
if not axis:
partitions_for_apply = self.partitions.T
else:
partitions_for_apply = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
def local_to_global_idx(partition_id, local_idx):
if partition_id == 0:
return local_idx
if axis == 0:
cumulative_axis = np.cumsum(self.block_widths)
else:
cumulative_axis = np.cumsum(self.block_lengths)
return cumulative_axis[partition_id - 1] + local_idx
if not keep_remaining:
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[o_idx],
func_dict={
i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)]
for i_idx in list_to_apply
if i_idx >= 0
},
)
for o_idx, list_to_apply in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[local_to_global_idx(i, idx)]
for idx in partitions_dict[i]
if idx >= 0
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# We are passing internal indices in here. In order for func to
# actually be able to use this information, it must be able to take in
# the internal indices. This might mean an iloc in the case of Pandas
# or some other way to index into the internal representation.
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[idx],
internal_indices=list_to_apply,
)
for idx, list_to_apply in partitions_dict
]
)
else:
# The difference here is that we modify a subset and return the
# remaining (non-updated) blocks in their original position.
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
internal_indices=partitions_dict[i],
)
for i in range(len(partitions_for_apply))
]
)
return self.__constructor__(result.T) if not axis else self.__constructor__(result)
|
def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False):
"""Applies a function to select indices.
Note: Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
Args:
axis: The axis to apply the func over.
func: The function to apply to these indices.
indices: The indices to apply the function to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseBlockPartitions object, the type of object that called this.
"""
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(
axis, indices, ordered=not keep_remaining
)
if not axis:
partitions_for_apply = self.partitions.T
else:
partitions_for_apply = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
def local_to_global_idx(partition_id, local_idx):
if partition_id == 0:
return local_idx
if axis == 0:
cumulative_axis = np.cumsum(self.block_widths)
else:
cumulative_axis = np.cumsum(self.block_lengths)
return cumulative_axis[partition_id - 1] + local_idx
if not keep_remaining:
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[o_idx],
func_dict={
i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)]
for i_idx in list_to_apply
if i_idx >= 0
},
)
for o_idx, list_to_apply in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[local_to_global_idx(i, idx)]
for idx in partitions_dict[i]
if idx >= 0
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# We are passing internal indices in here. In order for func to
# actually be able to use this information, it must be able to take in
# the internal indices. This might mean an iloc in the case of Pandas
# or some other way to index into the internal representation.
result = np.array(
[
self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[idx],
internal_indices=list_to_apply,
)
for idx, list_to_apply in partitions_dict
]
)
else:
# The difference here is that we modify a subset and return the
# remaining (non-updated) blocks in their original position.
result = np.array(
[
partitions_for_apply[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
internal_indices=partitions_dict[i],
)
for i in range(len(partitions_for_apply))
]
)
return self.__constructor__(result.T) if not axis else self.__constructor__(result)
|
https://github.com/modin-project/modin/issues/368
|
Exception Traceback (most recent call last)
~/Downloads/kaggle5/kaggle5.py in <module>
161 for j in range(0, 3):
162 try:
--> 163 guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna()
164 except:
165 print(dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)])
~/software_builds/modin/modin/pandas/dataframe.py in __getitem__(self, key)
4504 try:
4505 if key in self.columns and not is_mi_columns:
-> 4506 return self._getitem_column(key)
4507 except (KeyError, ValueError, TypeError):
4508 pass
~/software_builds/modin/modin/pandas/dataframe.py in _getitem_column(self, key)
4524 def _getitem_column(self, key):
4525 return SeriesView(
-> 4526 self._query_compiler.getitem_single_key(key), self, (slice(None), key)
4527 )
4528
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in getitem_single_key(self, key)
2165 A new PandasDataManager.
2166 """
-> 2167 new_data = self.getitem_column_array([key])
2168 if len(self.columns.get_indexer_for([key])) > 1:
2169 return new_data
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in getitem_column_array(self, key)
2192
2193 result = self.data.apply_func_to_select_indices(
-> 2194 0, getitem, numeric_indices, keep_remaining=False
2195 )
2196 # We can't just set the columns to key here because there may be
~/software_builds/modin/modin/engines/base/block_partitions.py in apply_func_to_select_indices(self, axis, func, indices, keep_remaining)
707 indices = [indices]
708 partitions_dict = self._get_dict_of_block_index(
--> 709 axis, indices, ordered=not keep_remaining
710 )
711 if not axis:
~/software_builds/modin/modin/engines/base/block_partitions.py in _get_dict_of_block_index(self, axis, indices, ordered)
631 # travel to each partition once.
632 all_partitions_and_idx = [
--> 633 self._get_blocks_containing_index(axis, i) for i in indices
634 ]
635
~/software_builds/modin/modin/engines/base/block_partitions.py in <listcomp>(.0)
631 # travel to each partition once.
632 all_partitions_and_idx = [
--> 633 self._get_blocks_containing_index(axis, i) for i in indices
634 ]
635
~/software_builds/modin/modin/engines/base/block_partitions.py in _get_blocks_containing_index(self, axis, index)
588 """
589 if not axis:
--> 590 ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_widths))
591 cumulative_column_widths = np.array(self.block_widths).cumsum()
592 block_idx = int(np.digitize(index, cumulative_column_widths))
~/software_builds/modin/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition)
25 if failure_condition:
26 raise Exception(
---> 27 "Internal Error. "
28 "Please email bugs@modin.org with the traceback and command that "
29 "caused this error."
Exception: Internal Error. Please email bugs@modin.org with the traceback and command that caused this error.
|
Exception
|
def apply_func_to_select_indices_along_full_axis(
self, axis, func, indices, keep_remaining=False
):
"""Applies a function to a select subset of full columns/rows.
Note: This should be used when you need to apply a function that relies
on some global information for the entire column/row, but only need
to apply a function to a subset.
Important: For your func to operate directly on the indices provided,
it must use `internal_indices` as a keyword argument.
Args:
axis: The axis to apply the function over (0 - rows, 1 - columns)
func: The function to apply.
indices: The global indices to apply the func to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseBlockPartitions object, the type of object that called this.
"""
if self.partitions.size == 0:
return np.array([[]])
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(axis, indices)
preprocessed_func = self.preprocess_func(func)
# Since we might be keeping the remaining blocks that are not modified,
# we have to also keep the block_partitions object in the correct
# direction (transpose for columns).
if not axis:
partitions_for_apply = self.column_partitions
partitions_for_remaining = self.partitions.T
else:
partitions_for_apply = self.row_partitions
partitions_for_remaining = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
if not keep_remaining:
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func,
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
preprocessed_func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in partitions_dict
]
)
else:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in range(len(partitions_for_remaining))
]
)
return self.__constructor__(result.T) if not axis else self.__constructor__(result)
|
def apply_func_to_select_indices_along_full_axis(
self, axis, func, indices, keep_remaining=False
):
"""Applies a function to a select subset of full columns/rows.
Note: This should be used when you need to apply a function that relies
on some global information for the entire column/row, but only need
to apply a function to a subset.
Important: For your func to operate directly on the indices provided,
it must use `internal_indices` as a keyword argument.
Args:
axis: The axis to apply the function over (0 - rows, 1 - columns)
func: The function to apply.
indices: The global indices to apply the func to.
keep_remaining: Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns:
A new BaseBlockPartitions object, the type of object that called this.
"""
if isinstance(indices, dict):
dict_indices = indices
indices = list(indices.keys())
else:
dict_indices = None
if not isinstance(indices, list):
indices = [indices]
partitions_dict = self._get_dict_of_block_index(axis, indices)
preprocessed_func = self.preprocess_func(func)
# Since we might be keeping the remaining blocks that are not modified,
# we have to also keep the block_partitions object in the correct
# direction (transpose for columns).
if not axis:
partitions_for_apply = self.column_partitions
partitions_for_remaining = self.partitions.T
else:
partitions_for_apply = self.row_partitions
partitions_for_remaining = self.partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_indices is not None:
if not keep_remaining:
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func,
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in partitions_dict
]
)
else:
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else self._apply_func_to_list_of_partitions(
preprocessed_func,
partitions_for_apply[i],
func_dict={
idx: dict_indices[idx] for idx in partitions_dict[i]
},
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in partitions_dict
]
)
else:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
partitions_for_remaining[i]
if i not in partitions_dict
else partitions_for_apply[i].apply(
preprocessed_func, internal_indices=partitions_dict[i]
)
for i in range(len(partitions_for_remaining))
]
)
return self.__constructor__(result.T) if not axis else self.__constructor__(result)
|
https://github.com/modin-project/modin/issues/368
|
Exception Traceback (most recent call last)
~/Downloads/kaggle5/kaggle5.py in <module>
161 for j in range(0, 3):
162 try:
--> 163 guess_df = dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)]['Age'].dropna()
164 except:
165 print(dataset[(dataset['Sex'] == i) & (dataset['Pclass'] == j+1)])
~/software_builds/modin/modin/pandas/dataframe.py in __getitem__(self, key)
4504 try:
4505 if key in self.columns and not is_mi_columns:
-> 4506 return self._getitem_column(key)
4507 except (KeyError, ValueError, TypeError):
4508 pass
~/software_builds/modin/modin/pandas/dataframe.py in _getitem_column(self, key)
4524 def _getitem_column(self, key):
4525 return SeriesView(
-> 4526 self._query_compiler.getitem_single_key(key), self, (slice(None), key)
4527 )
4528
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in getitem_single_key(self, key)
2165 A new PandasDataManager.
2166 """
-> 2167 new_data = self.getitem_column_array([key])
2168 if len(self.columns.get_indexer_for([key])) > 1:
2169 return new_data
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in getitem_column_array(self, key)
2192
2193 result = self.data.apply_func_to_select_indices(
-> 2194 0, getitem, numeric_indices, keep_remaining=False
2195 )
2196 # We can't just set the columns to key here because there may be
~/software_builds/modin/modin/engines/base/block_partitions.py in apply_func_to_select_indices(self, axis, func, indices, keep_remaining)
707 indices = [indices]
708 partitions_dict = self._get_dict_of_block_index(
--> 709 axis, indices, ordered=not keep_remaining
710 )
711 if not axis:
~/software_builds/modin/modin/engines/base/block_partitions.py in _get_dict_of_block_index(self, axis, indices, ordered)
631 # travel to each partition once.
632 all_partitions_and_idx = [
--> 633 self._get_blocks_containing_index(axis, i) for i in indices
634 ]
635
~/software_builds/modin/modin/engines/base/block_partitions.py in <listcomp>(.0)
631 # travel to each partition once.
632 all_partitions_and_idx = [
--> 633 self._get_blocks_containing_index(axis, i) for i in indices
634 ]
635
~/software_builds/modin/modin/engines/base/block_partitions.py in _get_blocks_containing_index(self, axis, index)
588 """
589 if not axis:
--> 590 ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_widths))
591 cumulative_column_widths = np.array(self.block_widths).cumsum()
592 block_idx = int(np.digitize(index, cumulative_column_widths))
~/software_builds/modin/modin/error_message.py in catch_bugs_and_request_email(cls, failure_condition)
25 if failure_condition:
26 raise Exception(
---> 27 "Internal Error. "
28 "Please email bugs@modin.org with the traceback and command that "
29 "caused this error."
Exception: Internal Error. Please email bugs@modin.org with the traceback and command that caused this error.
|
Exception
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
if len(new_columns) != 0:
numeric = True
exclude = kwargs.get("exclude", None)
include = kwargs.get("include", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
if include is None:
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if is_list_like(exclude):
exclude.append(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
else:
numeric = False
# If only timedelta and datetime objects, only do the timedelta
# columns
if all(
(
dtype
for dtype in self.dtypes
if dtype == np.datetime64 or dtype == np.timedelta64
)
):
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
# Describe all columns
new_columns = self.columns
def describe_builder(df, **kwargs):
try:
return pandas.DataFrame.describe(df, **kwargs)
except ValueError:
return pandas.DataFrame(index=df.index)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self.full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
if numeric:
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
else:
new_dtypes = pandas.Series([np.object for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
if len(new_columns) != 0:
numeric = True
exclude = kwargs.get("exclude", None)
include = kwargs.get("include", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
if include is None:
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if is_list_like(exclude):
exclude.append(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
else:
numeric = False
# If only timedelta and datetime objects, only do the timedelta
# columns
if all(
(
dtype
for dtype in self.dtypes
if dtype == np.datetime64 or dtype == np.timedelta64
)
):
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
# Describe all columns
new_columns = self.columns
def describe_builder(df, **kwargs):
return pandas.DataFrame.describe(df, **kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self.full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
if numeric:
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
else:
new_dtypes = pandas.Series([np.object for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
https://github.com/modin-project/modin/issues/364
|
---------------------------------------------------------------------------
RayTaskError Traceback (most recent call last)
~/Downloads/kaggle5/kaggle5.py in <module>
51
52
---> 53 train_df.describe(include=['O'])
54
55 # In[9]:
~/software_builds/modin/modin/pandas/dataframe.py in describe(self, percentiles, include, exclude)
1093 return DataFrame(
1094 query_compiler=self._query_compiler.describe(
-> 1095 percentiles=percentiles, include=include, exclude=exclude
1096 )
1097 )
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in describe(self, **kwargs)
1601 func, 0, new_columns, False
1602 )
-> 1603 new_index = self.compute_index(0, new_data, False)
1604 if numeric:
1605 new_dtypes = pandas.Series(
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in compute_index(self, axis, data_object, compute_diff)
136 axis=axis,
137 index_func=lambda df: pandas_index_extraction(df, axis),
--> 138 old_blocks=old_blocks,
139 )
140 return index_obj[new_indices] if compute_diff else new_indices
~/software_builds/modin/modin/engines/base/block_partitions.py in get_indices(self, axis, index_func, old_blocks)
514 new_indices = (
515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
--> 516 if len(self._partitions_cache.T)
517 else []
518 )
~/software_builds/modin/modin/engines/base/block_partitions.py in <listcomp>(.0)
513 # DO NOT CHANGE TO self.partitions under any circumstance.
514 new_indices = (
--> 515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
516 if len(self._partitions_cache.T)
517 else []
~/software_builds/modin/modin/engines/ray/pandas_on_ray/remote_partition.py in get(self)
26 return self.apply(lambda x: x).get()
27
---> 28 return ray.get(self.oid)
29
30 def apply(self, func, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/ray/worker.py in get(object_ids, worker)
2384 # here.
2385 last_task_error_raise_time = time.time()
-> 2386 raise value
2387 return value
2388
RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.remote_partition.deploy_ray_func() (pid=17711, host=iMac.local)
ray.worker.RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.axis_partition.deploy_ray_axis_func() (pid=17712, host=iMac.local)
File "/Users/DevinPetersohn/software_builds/modin/modin/engines/ray/pandas_on_ray/axis_partition.py", line 127, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 163, in helper
return pandas_func(df, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 1596, in describe_builder
return pandas.DataFrame.describe(df, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/generic.py", line 8585, in describe
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/reshape/concat.py", line 225, in concat
copy=copy, sort=sort)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/reshape/concat.py", line 259, in __init__
raise ValueError('No objects to concatenate')
ValueError: No objects to concatenate
|
RayTaskError
|
def describe_builder(df, **kwargs):
try:
return pandas.DataFrame.describe(df, **kwargs)
except ValueError:
return pandas.DataFrame(index=df.index)
|
def describe_builder(df, **kwargs):
return pandas.DataFrame.describe(df, **kwargs)
|
https://github.com/modin-project/modin/issues/364
|
---------------------------------------------------------------------------
RayTaskError Traceback (most recent call last)
~/Downloads/kaggle5/kaggle5.py in <module>
51
52
---> 53 train_df.describe(include=['O'])
54
55 # In[9]:
~/software_builds/modin/modin/pandas/dataframe.py in describe(self, percentiles, include, exclude)
1093 return DataFrame(
1094 query_compiler=self._query_compiler.describe(
-> 1095 percentiles=percentiles, include=include, exclude=exclude
1096 )
1097 )
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in describe(self, **kwargs)
1601 func, 0, new_columns, False
1602 )
-> 1603 new_index = self.compute_index(0, new_data, False)
1604 if numeric:
1605 new_dtypes = pandas.Series(
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in compute_index(self, axis, data_object, compute_diff)
136 axis=axis,
137 index_func=lambda df: pandas_index_extraction(df, axis),
--> 138 old_blocks=old_blocks,
139 )
140 return index_obj[new_indices] if compute_diff else new_indices
~/software_builds/modin/modin/engines/base/block_partitions.py in get_indices(self, axis, index_func, old_blocks)
514 new_indices = (
515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
--> 516 if len(self._partitions_cache.T)
517 else []
518 )
~/software_builds/modin/modin/engines/base/block_partitions.py in <listcomp>(.0)
513 # DO NOT CHANGE TO self.partitions under any circumstance.
514 new_indices = (
--> 515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
516 if len(self._partitions_cache.T)
517 else []
~/software_builds/modin/modin/engines/ray/pandas_on_ray/remote_partition.py in get(self)
26 return self.apply(lambda x: x).get()
27
---> 28 return ray.get(self.oid)
29
30 def apply(self, func, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/ray/worker.py in get(object_ids, worker)
2384 # here.
2385 last_task_error_raise_time = time.time()
-> 2386 raise value
2387 return value
2388
RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.remote_partition.deploy_ray_func() (pid=17711, host=iMac.local)
ray.worker.RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.axis_partition.deploy_ray_axis_func() (pid=17712, host=iMac.local)
File "/Users/DevinPetersohn/software_builds/modin/modin/engines/ray/pandas_on_ray/axis_partition.py", line 127, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 163, in helper
return pandas_func(df, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 1596, in describe_builder
return pandas.DataFrame.describe(df, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/generic.py", line 8585, in describe
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/reshape/concat.py", line 225, in concat
copy=copy, sort=sort)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/reshape/concat.py", line 259, in __init__
raise ValueError('No objects to concatenate')
ValueError: No objects to concatenate
|
RayTaskError
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
if len(new_columns) != 0:
numeric = True
exclude = kwargs.get("exclude", None)
include = kwargs.get("include", None)
# This is done to check against the default dtypes with 'in'.
# We don't change `include` in kwargs, so we can just use this for the
# check.
if include is None:
include = []
default_excludes = [np.timedelta64, np.datetime64, np.object, np.bool]
add_to_excludes = [e for e in default_excludes if e not in include]
if is_list_like(exclude):
exclude.append(add_to_excludes)
else:
exclude = add_to_excludes
kwargs["exclude"] = exclude
else:
numeric = False
# If only timedelta and datetime objects, only do the timedelta
# columns
if all(
(
dtype
for dtype in self.dtypes
if dtype == np.datetime64 or dtype == np.timedelta64
)
):
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
# Describe all columns
new_columns = self.columns
def describe_builder(df, **kwargs):
return pandas.DataFrame.describe(df, **kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self.full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
if numeric:
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
else:
new_dtypes = pandas.Series([np.object for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Only describe numeric if there are numeric columns
# Otherwise, describe all
new_columns = self.numeric_columns(include_bool=False)
if len(new_columns) != 0:
numeric = True
exclude = kwargs.get("exclude", None)
if is_list_like(exclude):
exclude.append([np.timedelta64, np.datetime64, np.object, np.bool])
else:
exclude = [exclude, np.timedelta64, np.datetime64, np.object, np.bool]
kwargs["exclude"] = exclude
else:
numeric = False
# If only timedelta and datetime objects, only do the timedelta
# columns
if all(
(
dtype
for dtype in self.dtypes
if dtype == np.datetime64 or dtype == np.timedelta64
)
):
new_columns = [
self.columns[i]
for i in range(len(self.columns))
if self.dtypes[i] != np.dtype("datetime64[ns]")
]
else:
# Describe all columns
new_columns = self.columns
def describe_builder(df, **kwargs):
return pandas.DataFrame.describe(df, **kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self.full_axis_reduce_along_select_indices(func, 0, new_columns, False)
new_index = self.compute_index(0, new_data, False)
if numeric:
new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns)
else:
new_dtypes = pandas.Series([np.object for _ in new_columns], index=new_columns)
return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
|
https://github.com/modin-project/modin/issues/362
|
RayTaskError Traceback (most recent call last)
~/Downloads/kaggle5/kaggle5.py in <module>
51
52
---> 53 train_df.describe(include=['O'])
54
55 # In[9]:
~/software_builds/modin/modin/pandas/dataframe.py in describe(self, percentiles, include, exclude)
1093 return DataFrame(
1094 query_compiler=self._query_compiler.describe(
-> 1095 percentiles=percentiles, include=include, exclude=exclude
1096 )
1097 )
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in describe(self, **kwargs)
1593 func, 0, new_columns, False
1594 )
-> 1595 new_index = self.compute_index(0, new_data, False)
1596 if numeric:
1597 new_dtypes = pandas.Series(
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in compute_index(self, axis, data_object, compute_diff)
136 axis=axis,
137 index_func=lambda df: pandas_index_extraction(df, axis),
--> 138 old_blocks=old_blocks,
139 )
140 return index_obj[new_indices] if compute_diff else new_indices
~/software_builds/modin/modin/engines/base/block_partitions.py in get_indices(self, axis, index_func, old_blocks)
514 new_indices = (
515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
--> 516 if len(self._partitions_cache.T)
517 else []
518 )
~/software_builds/modin/modin/engines/base/block_partitions.py in <listcomp>(.0)
513 # DO NOT CHANGE TO self.partitions under any circumstance.
514 new_indices = (
--> 515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
516 if len(self._partitions_cache.T)
517 else []
~/software_builds/modin/modin/engines/ray/pandas_on_ray/remote_partition.py in get(self)
26 return self.apply(lambda x: x).get()
27
---> 28 return ray.get(self.oid)
29
30 def apply(self, func, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/ray/worker.py in get(object_ids, worker)
2384 # here.
2385 last_task_error_raise_time = time.time()
-> 2386 raise value
2387 return value
2388
RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.remote_partition.deploy_ray_func() (pid=17340, host=iMac.local)
ray.worker.RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.axis_partition.deploy_ray_axis_func() (pid=17341, host=iMac.local)
File "/Users/DevinPetersohn/software_builds/modin/modin/engines/ray/pandas_on_ray/axis_partition.py", line 127, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 163, in helper
return pandas_func(df, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 1588, in describe_builder
return pandas.DataFrame.describe(df, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/generic.py", line 8574, in describe
data = self.select_dtypes(include=include, exclude=exclude)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/frame.py", line 3064, in select_dtypes
inc_ex=(include & exclude)))
ValueError: include and exclude overlap on frozenset({<class 'numpy.object_'>})
|
RayTaskError
|
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
|
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if exclude is None:
exclude = "object"
elif "object" not in include:
exclude = (
([exclude] + ["object"])
if isinstance(exclude, str)
else list(exclude) + ["object"]
)
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
|
https://github.com/modin-project/modin/issues/362
|
RayTaskError Traceback (most recent call last)
~/Downloads/kaggle5/kaggle5.py in <module>
51
52
---> 53 train_df.describe(include=['O'])
54
55 # In[9]:
~/software_builds/modin/modin/pandas/dataframe.py in describe(self, percentiles, include, exclude)
1093 return DataFrame(
1094 query_compiler=self._query_compiler.describe(
-> 1095 percentiles=percentiles, include=include, exclude=exclude
1096 )
1097 )
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in describe(self, **kwargs)
1593 func, 0, new_columns, False
1594 )
-> 1595 new_index = self.compute_index(0, new_data, False)
1596 if numeric:
1597 new_dtypes = pandas.Series(
~/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py in compute_index(self, axis, data_object, compute_diff)
136 axis=axis,
137 index_func=lambda df: pandas_index_extraction(df, axis),
--> 138 old_blocks=old_blocks,
139 )
140 return index_obj[new_indices] if compute_diff else new_indices
~/software_builds/modin/modin/engines/base/block_partitions.py in get_indices(self, axis, index_func, old_blocks)
514 new_indices = (
515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
--> 516 if len(self._partitions_cache.T)
517 else []
518 )
~/software_builds/modin/modin/engines/base/block_partitions.py in <listcomp>(.0)
513 # DO NOT CHANGE TO self.partitions under any circumstance.
514 new_indices = (
--> 515 [idx.apply(func).get() for idx in self._partitions_cache.T[0]]
516 if len(self._partitions_cache.T)
517 else []
~/software_builds/modin/modin/engines/ray/pandas_on_ray/remote_partition.py in get(self)
26 return self.apply(lambda x: x).get()
27
---> 28 return ray.get(self.oid)
29
30 def apply(self, func, **kwargs):
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/ray/worker.py in get(object_ids, worker)
2384 # here.
2385 last_task_error_raise_time = time.time()
-> 2386 raise value
2387 return value
2388
RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.remote_partition.deploy_ray_func() (pid=17340, host=iMac.local)
ray.worker.RayTaskError: ray_worker:modin.engines.ray.pandas_on_ray.axis_partition.deploy_ray_axis_func() (pid=17341, host=iMac.local)
File "/Users/DevinPetersohn/software_builds/modin/modin/engines/ray/pandas_on_ray/axis_partition.py", line 127, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 163, in helper
return pandas_func(df, **kwargs)
File "/Users/DevinPetersohn/software_builds/modin/modin/data_management/query_compiler/pandas_query_compiler.py", line 1588, in describe_builder
return pandas.DataFrame.describe(df, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/generic.py", line 8574, in describe
data = self.select_dtypes(include=include, exclude=exclude)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/pandas/core/frame.py", line 3064, in select_dtypes
inc_ex=(include & exclude)))
ValueError: include and exclude overlap on frozenset({<class 'numpy.object_'>})
|
RayTaskError
|
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
data_manager=self._data_manager.cummax(axis=axis, skipna=skipna, **kwargs)
)
|
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return DataFrame(
data_manager=self._data_manager.cummax(axis=axis, skipna=skipna, **kwargs)
)
|
https://github.com/modin-project/modin/issues/126
|
RayGetError: Could not get objectid ObjectID(01000000f0cc325805f5c83895ed0532827d52de). It was created by remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func which failed with:
Remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func failed with:
Traceback (most recent call last):
File "/Users/William/Documents/modin/modin/data_management/partitioning/axis_partition.py", line 188, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/William/Documents/modin/modin/data_management/data_manager.py", line 158, in helper
def helper(df, internal_indices=[]):
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 9661, in cum_func
result = accum_func(y, axis)
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 8829, in <lambda>
lambda y, axis: np.maximum.accumulate(y, axis), "max",
File "pandas/_libs/tslibs/timestamps.pyx", line 170, in pandas._libs.tslibs.timestamps._Timestamp.__richcmp__
TypeError: Cannot compare type 'Timestamp' with type 'float'
|
TypeError
|
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
data_manager=self._data_manager.cummin(axis=axis, skipna=skipna, **kwargs)
)
|
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return DataFrame(
data_manager=self._data_manager.cummin(axis=axis, skipna=skipna, **kwargs)
)
|
https://github.com/modin-project/modin/issues/126
|
RayGetError: Could not get objectid ObjectID(01000000f0cc325805f5c83895ed0532827d52de). It was created by remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func which failed with:
Remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func failed with:
Traceback (most recent call last):
File "/Users/William/Documents/modin/modin/data_management/partitioning/axis_partition.py", line 188, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/William/Documents/modin/modin/data_management/data_manager.py", line 158, in helper
def helper(df, internal_indices=[]):
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 9661, in cum_func
result = accum_func(y, axis)
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 8829, in <lambda>
lambda y, axis: np.maximum.accumulate(y, axis), "max",
File "pandas/_libs/tslibs/timestamps.pyx", line 170, in pandas._libs.tslibs.timestamps._Timestamp.__richcmp__
TypeError: Cannot compare type 'Timestamp' with type 'float'
|
TypeError
|
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
data_manager=self._data_manager.cumprod(axis=axis, skipna=skipna, **kwargs)
)
|
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return DataFrame(
data_manager=self._data_manager.cumprod(axis=axis, skipna=skipna, **kwargs)
)
|
https://github.com/modin-project/modin/issues/126
|
RayGetError: Could not get objectid ObjectID(01000000f0cc325805f5c83895ed0532827d52de). It was created by remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func which failed with:
Remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func failed with:
Traceback (most recent call last):
File "/Users/William/Documents/modin/modin/data_management/partitioning/axis_partition.py", line 188, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/William/Documents/modin/modin/data_management/data_manager.py", line 158, in helper
def helper(df, internal_indices=[]):
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 9661, in cum_func
result = accum_func(y, axis)
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 8829, in <lambda>
lambda y, axis: np.maximum.accumulate(y, axis), "max",
File "pandas/_libs/tslibs/timestamps.pyx", line 170, in pandas._libs.tslibs.timestamps._Timestamp.__richcmp__
TypeError: Cannot compare type 'Timestamp' with type 'float'
|
TypeError
|
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
data_manager=self._data_manager.cumsum(axis=axis, skipna=skipna, **kwargs)
)
|
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return DataFrame(
data_manager=self._data_manager.cumsum(axis=axis, skipna=skipna, **kwargs)
)
|
https://github.com/modin-project/modin/issues/126
|
RayGetError: Could not get objectid ObjectID(01000000f0cc325805f5c83895ed0532827d52de). It was created by remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func which failed with:
Remote function modin.data_management.partitioning.axis_partition.deploy_ray_axis_func failed with:
Traceback (most recent call last):
File "/Users/William/Documents/modin/modin/data_management/partitioning/axis_partition.py", line 188, in deploy_ray_axis_func
result = func(dataframe, **kwargs)
File "/Users/William/Documents/modin/modin/data_management/data_manager.py", line 158, in helper
def helper(df, internal_indices=[]):
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 9661, in cum_func
result = accum_func(y, axis)
File "/Users/William/Documents/modin/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 8829, in <lambda>
lambda y, axis: np.maximum.accumulate(y, axis), "max",
File "pandas/_libs/tslibs/timestamps.pyx", line 170, in pandas._libs.tslibs.timestamps._Timestamp.__richcmp__
TypeError: Cannot compare type 'Timestamp' with type 'float'
|
TypeError
|
def send_templated_mail(
template_name,
context,
recipients,
sender=None,
bcc=None,
fail_silently=False,
files=None,
extra_headers={},
):
"""
send_templated_mail() is a wrapper around Django's e-mail routines that
allows us to easily send multipart (text/plain & text/html) e-mails using
templates that are stored in the database. This lets the admin provide
both a text and a HTML template for each message.
template_name is the slug of the template to use for this message (see
models.EmailTemplate)
context is a dictionary to be used when rendering the template
recipients can be either a string, eg 'a@b.com', or a list of strings.
sender should contain a string, eg 'My Site <me@z.com>'. If you leave it
blank, it'll use settings.DEFAULT_FROM_EMAIL as a fallback.
bcc is an optional list of addresses that will receive this message as a
blind carbon copy.
fail_silently is passed to Django's mail routine. Set to 'True' to ignore
any errors at send time.
files can be a list of tuples. Each tuple should be a filename to attach,
along with the File objects to be read. files can be blank.
extra_headers is a dictionary of extra email headers, needed to process
email replies and keep proper threading.
"""
from django.core.mail import EmailMultiAlternatives
from django.template import engines
from_string = engines["django"].from_string
from helpdesk.models import EmailTemplate
from helpdesk.settings import (
HELPDESK_EMAIL_SUBJECT_TEMPLATE,
HELPDESK_EMAIL_FALLBACK_LOCALE,
)
locale = context["queue"].get("locale") or HELPDESK_EMAIL_FALLBACK_LOCALE
try:
t = EmailTemplate.objects.get(
template_name__iexact=template_name, locale=locale
)
except EmailTemplate.DoesNotExist:
try:
t = EmailTemplate.objects.get(
template_name__iexact=template_name, locale__isnull=True
)
except EmailTemplate.DoesNotExist:
logger.warning('template "%s" does not exist, no mail sent', template_name)
return # just ignore if template doesn't exist
subject_part = (
from_string(HELPDESK_EMAIL_SUBJECT_TEMPLATE % {"subject": t.subject})
.render(context)
.replace("\n", "")
.replace("\r", "")
)
footer_file = os.path.join("helpdesk", locale, "email_text_footer.txt")
text_part = from_string(
"%s{%% include '%s' %%}" % (t.plain_text, footer_file)
).render(context)
email_html_base_file = os.path.join("helpdesk", locale, "email_html_base.html")
# keep new lines in html emails
if "comment" in context:
context["comment"] = mark_safe(context["comment"].replace("\r\n", "<br>"))
html_part = from_string(
"{%% extends '%s' %%}{%% block title %%}"
"%s"
"{%% endblock %%}{%% block content %%}%s{%% endblock %%}"
% (email_html_base_file, t.heading, t.html)
).render(context)
if isinstance(recipients, str):
if recipients.find(","):
recipients = recipients.split(",")
elif type(recipients) != list:
recipients = [recipients]
msg = EmailMultiAlternatives(
subject_part,
text_part,
sender or settings.DEFAULT_FROM_EMAIL,
recipients,
bcc=bcc,
)
msg.attach_alternative(html_part, "text/html")
if files:
for filename, filefield in files:
filefield.open("rb")
content = filefield.read()
msg.attach(filename, content)
filefield.close()
logger.debug("Sending email to: {!r}".format(recipients))
try:
return msg.send()
except SMTPException as e:
logger.exception(
"SMTPException raised while sending email to {}".format(recipients)
)
if not fail_silently:
raise e
return 0
|
def send_templated_mail(
template_name,
context,
recipients,
sender=None,
bcc=None,
fail_silently=False,
files=None,
extra_headers={},
):
"""
send_templated_mail() is a wrapper around Django's e-mail routines that
allows us to easily send multipart (text/plain & text/html) e-mails using
templates that are stored in the database. This lets the admin provide
both a text and a HTML template for each message.
template_name is the slug of the template to use for this message (see
models.EmailTemplate)
context is a dictionary to be used when rendering the template
recipients can be either a string, eg 'a@b.com', or a list of strings.
sender should contain a string, eg 'My Site <me@z.com>'. If you leave it
blank, it'll use settings.DEFAULT_FROM_EMAIL as a fallback.
bcc is an optional list of addresses that will receive this message as a
blind carbon copy.
fail_silently is passed to Django's mail routine. Set to 'True' to ignore
any errors at send time.
files can be a list of tuples. Each tuple should be a filename to attach,
along with the File objects to be read. files can be blank.
extra_headers is a dictionary of extra email headers, needed to process
email replies and keep proper threading.
"""
from django.core.mail import EmailMultiAlternatives
from django.template import engines
from_string = engines["django"].from_string
from helpdesk.models import EmailTemplate
from helpdesk.settings import (
HELPDESK_EMAIL_SUBJECT_TEMPLATE,
HELPDESK_EMAIL_FALLBACK_LOCALE,
)
locale = context["queue"].get("locale") or HELPDESK_EMAIL_FALLBACK_LOCALE
try:
t = EmailTemplate.objects.get(
template_name__iexact=template_name, locale=locale
)
except EmailTemplate.DoesNotExist:
try:
t = EmailTemplate.objects.get(
template_name__iexact=template_name, locale__isnull=True
)
except EmailTemplate.DoesNotExist:
logger.warning('template "%s" does not exist, no mail sent', template_name)
return # just ignore if template doesn't exist
subject_part = (
from_string(HELPDESK_EMAIL_SUBJECT_TEMPLATE % {"subject": t.subject})
.render(context)
.replace("\n", "")
.replace("\r", "")
)
footer_file = os.path.join("helpdesk", locale, "email_text_footer.txt")
text_part = from_string(
"%s{%% include '%s' %%}" % (t.plain_text, footer_file)
).render(context)
email_html_base_file = os.path.join("helpdesk", locale, "email_html_base.html")
# keep new lines in html emails
if "comment" in context:
context["comment"] = mark_safe(context["comment"].replace("\r\n", "<br>"))
html_part = from_string(
"{%% extends '%s' %%}{%% block title %%}"
"%s"
"{%% endblock %%}{%% block content %%}%s{%% endblock %%}"
% (email_html_base_file, t.heading, t.html)
).render(context)
if isinstance(recipients, str):
if recipients.find(","):
recipients = recipients.split(",")
elif type(recipients) != list:
recipients = [recipients]
msg = EmailMultiAlternatives(
subject_part,
text_part,
sender or settings.DEFAULT_FROM_EMAIL,
recipients,
bcc=bcc,
)
msg.attach_alternative(html_part, "text/html")
if files:
for filename, filefield in files:
mime = mimetypes.guess_type(filename)
if mime[0] is not None and mime[0] == "text/plain":
with open(filefield.path, "r") as attachedfile:
content = attachedfile.read()
msg.attach(filename, content)
else:
msg.attach_file(filefield.path)
logger.debug("Sending email to: {!r}".format(recipients))
try:
return msg.send()
except SMTPException as e:
logger.exception(
"SMTPException raised while sending email to {}".format(recipients)
)
if not fail_silently:
raise e
return 0
|
https://github.com/django-helpdesk/django-helpdesk/issues/721
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/apps/project/.local/lib/python2.7/site-packages/helpdesk/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/opt/apps/project/.local/lib/python2.7/site-packages/helpdesk/views/public.py", line 51, in homepage
ticket = form.save()
File "/opt/apps/project/.local/lib/python2.7/site-packages/helpdesk/forms.py", line 399, in save
files=files)
File "/opt/apps/project/.local/lib/python2.7/site-packages/helpdesk/forms.py", line 250, in _send_messages
files=files,
File "/opt/apps/project/.local/lib/python2.7/site-packages/helpdesk/lib.py", line 135, in send_templated_mail
with open(filefield.path, 'rb') as attachedfile:
File "/usr/local/lib/python2.7/dist-packages/django/db/models/fields/files.py", line 65, in path
return self.storage.path(self.name)
File "/usr/local/lib/python2.7/dist-packages/django/core/files/storage.py", line 111, in path
raise NotImplementedError("This backend doesn't support absolute paths.")
NotImplementedError: This backend doesn't support absolute paths.
|
NotImplementedError
|
def ticket_from_message(message, queue, logger):
# 'message' must be an RFC822 formatted message.
message = (
email.message_from_string(message)
if six.PY3
else email.message_from_string(message.encode("utf-8"))
)
subject = message.get("subject", _("Comment from e-mail"))
subject = decode_mail_headers(decodeUnknown(message.get_charset(), subject))
for affix in STRIPPED_SUBJECT_STRINGS:
subject = subject.replace(affix, "")
subject = subject.strip()
sender = message.get("from", _("Unknown Sender"))
sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))
sender_email = email.utils.parseaddr(sender)[1]
cc = message.get_all("cc", None)
if cc:
# first, fixup the encoding if necessary
cc = [decode_mail_headers(decodeUnknown(message.get_charset(), x)) for x in cc]
# get_all checks if multiple CC headers, but individual emails may be comma separated too
tempcc = []
for hdr in cc:
tempcc.extend(hdr.split(","))
# use a set to ensure no duplicates
cc = set([x.strip() for x in tempcc])
for ignore in IgnoreEmail.objects.filter(Q(queues=queue) | Q(queues__isnull=True)):
if ignore.test(sender_email):
if ignore.keep_in_mailbox:
# By returning 'False' the message will be kept in the mailbox,
# and the 'True' will cause the message to be deleted.
return False
return True
matchobj = re.match(r".*\[" + queue.slug + "-(?P<id>\d+)\]", subject)
if matchobj:
# This is a reply or forward.
ticket = matchobj.group("id")
logger.info("Matched tracking ID %s-%s" % (queue.slug, ticket))
else:
logger.info("No tracking ID matched.")
ticket = None
body = None
counter = 0
files = []
for part in message.walk():
if part.get_content_maintype() == "multipart":
continue
name = part.get_param("name")
if name:
name = email.utils.collapse_rfc2231_value(name)
if part.get_content_maintype() == "text" and name is None:
if part.get_content_subtype() == "plain":
body = EmailReplyParser.parse_reply(
decodeUnknown(
part.get_content_charset(), part.get_payload(decode=True)
)
)
# workaround to get unicode text out rather than escaped text
try:
body = body.encode("ascii").decode("unicode_escape")
except UnicodeEncodeError:
body.encode("utf-8")
logger.debug("Discovered plain text MIME part")
else:
files.append(
SimpleUploadedFile(
_("email_html_body.html"),
encoding.smart_bytes(part.get_payload()),
"text/html",
)
)
logger.debug("Discovered HTML MIME part")
else:
if not name:
ext = mimetypes.guess_extension(part.get_content_type())
name = "part-%i%s" % (counter, ext)
payload = part.get_payload()
if isinstance(payload, list):
payload = payload.pop().as_string()
payloadToWrite = payload
# check version of python to ensure use of only the correct error type
if six.PY2:
non_b64_err = binascii.Error
else:
non_b64_err = TypeError
try:
logger.debug("Try to base64 decode the attachment payload")
payloadToWrite = base64.decodestring(payload)
except non_b64_err:
logger.debug("Payload was not base64 encoded, using raw bytes")
payloadToWrite = payload
files.append(
SimpleUploadedFile(
name, part.get_payload(decode=True), mimetypes.guess_type(name)[0]
)
)
logger.debug("Found MIME attachment %s" % name)
counter += 1
if not body:
mail = BeautifulSoup(part.get_payload(), "lxml")
if ">" in mail.text:
message_body = mail.text.split(">")[1]
body = message_body.encode("ascii", errors="ignore")
else:
body = mail.text
if ticket:
try:
t = Ticket.objects.get(id=ticket)
except Ticket.DoesNotExist:
logger.info(
"Tracking ID %s-%s not associated with existing ticket. Creating new ticket."
% (queue.slug, ticket)
)
ticket = None
else:
logger.info(
"Found existing ticket with Tracking ID %s-%s" % (t.queue.slug, t.id)
)
if t.status == Ticket.CLOSED_STATUS:
t.status = Ticket.REOPENED_STATUS
t.save()
new = False
smtp_priority = message.get("priority", "")
smtp_importance = message.get("importance", "")
high_priority_types = {"high", "important", "1", "urgent"}
priority = 2 if high_priority_types & {smtp_priority, smtp_importance} else 3
if ticket is None:
if settings.QUEUE_EMAIL_BOX_UPDATE_ONLY:
return None
new = True
t = Ticket.objects.create(
title=subject,
queue=queue,
submitter_email=sender_email,
created=timezone.now(),
description=body,
priority=priority,
)
logger.debug("Created new ticket %s-%s" % (t.queue.slug, t.id))
if cc:
# get list of currently CC'd emails
current_cc = TicketCC.objects.filter(ticket=ticket)
current_cc_emails = [x.email for x in current_cc if x.email]
# get emails of any Users CC'd to email, if defined
# (some Users may not have an associated email, e.g, when using LDAP)
current_cc_users = [x.user.email for x in current_cc if x.user and x.user.email]
# ensure submitter, assigned user, queue email not added
other_emails = [queue.email_address]
if t.submitter_email:
other_emails.append(t.submitter_email)
if t.assigned_to:
other_emails.append(t.assigned_to.email)
current_cc = set(current_cc_emails + current_cc_users + other_emails)
# first, add any User not previously CC'd (as identified by User's email)
all_users = User.objects.all()
all_user_emails = set([x.email for x in all_users])
users_not_currently_ccd = all_user_emails.difference(set(current_cc))
users_to_cc = cc.intersection(users_not_currently_ccd)
for user in users_to_cc:
tcc = TicketCC.objects.create(
ticket=t,
user=User.objects.get(email=user),
can_view=True,
can_update=False,
)
tcc.save()
# then add remaining emails alphabetically, makes testing easy
new_cc = cc.difference(current_cc).difference(all_user_emails)
new_cc = sorted(list(new_cc))
for ccemail in new_cc:
tcc = TicketCC.objects.create(
ticket=t, email=ccemail, can_view=True, can_update=False
)
tcc.save()
f = FollowUp(
ticket=t,
title=_(
"E-Mail Received from %(sender_email)s" % {"sender_email": sender_email}
),
date=timezone.now(),
public=True,
comment=body,
)
if t.status == Ticket.REOPENED_STATUS:
f.new_status = Ticket.REOPENED_STATUS
f.title = _(
"Ticket Re-Opened by E-Mail Received from %(sender_email)s"
% {"sender_email": sender_email}
)
f.save()
logger.debug("Created new FollowUp for Ticket")
if six.PY2:
logger.info(
(
"[%s-%s] %s"
% (
t.queue.slug,
t.id,
t.title,
)
).encode("ascii", "replace")
)
elif six.PY3:
logger.info(
"[%s-%s] %s"
% (
t.queue.slug,
t.id,
t.title,
)
)
attached = process_attachments(f, files)
for att_file in attached:
logger.info(
"Attachment '%s' (with size %s) successfully added to ticket from email."
% (att_file[0], att_file[1].size)
)
context = safe_template_context(t)
if new:
if sender_email:
send_templated_mail(
"newticket_submitter",
context,
recipients=sender_email,
sender=queue.from_address,
fail_silently=True,
)
if queue.new_ticket_cc:
send_templated_mail(
"newticket_cc",
context,
recipients=queue.new_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc and queue.updated_ticket_cc != queue.new_ticket_cc:
send_templated_mail(
"newticket_cc",
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
else:
context.update(comment=f.comment)
if t.assigned_to:
send_templated_mail(
"updated_owner",
context,
recipients=t.assigned_to.email,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc:
send_templated_mail(
"updated_cc",
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
return t
|
def ticket_from_message(message, queue, logger):
# 'message' must be an RFC822 formatted message.
message = (
email.message_from_string(message)
if six.PY3
else email.message_from_string(message.encode("utf-8"))
)
subject = message.get("subject", _("Comment from e-mail"))
subject = decode_mail_headers(decodeUnknown(message.get_charset(), subject))
for affix in STRIPPED_SUBJECT_STRINGS:
subject = subject.replace(affix, "")
subject = subject.strip()
sender = message.get("from", _("Unknown Sender"))
sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))
sender_email = email.utils.parseaddr(sender)[1]
cc = message.get_all("cc", None)
if cc:
# first, fixup the encoding if necessary
cc = [decode_mail_headers(decodeUnknown(message.get_charset(), x)) for x in cc]
# get_all checks if multiple CC headers, but individual emails may be comma separated too
tempcc = []
for hdr in cc:
tempcc.extend(hdr.split(","))
# use a set to ensure no duplicates
cc = set([x.strip() for x in tempcc])
for ignore in IgnoreEmail.objects.filter(Q(queues=queue) | Q(queues__isnull=True)):
if ignore.test(sender_email):
if ignore.keep_in_mailbox:
# By returning 'False' the message will be kept in the mailbox,
# and the 'True' will cause the message to be deleted.
return False
return True
matchobj = re.match(r".*\[" + queue.slug + "-(?P<id>\d+)\]", subject)
if matchobj:
# This is a reply or forward.
ticket = matchobj.group("id")
logger.info("Matched tracking ID %s-%s" % (queue.slug, ticket))
else:
logger.info("No tracking ID matched.")
ticket = None
body = None
counter = 0
files = []
for part in message.walk():
if part.get_content_maintype() == "multipart":
continue
name = part.get_param("name")
if name:
name = email.utils.collapse_rfc2231_value(name)
if part.get_content_maintype() == "text" and name is None:
if part.get_content_subtype() == "plain":
body = EmailReplyParser.parse_reply(
decodeUnknown(
part.get_content_charset(), part.get_payload(decode=True)
)
)
# workaround to get unicode text out rather than escaped text
try:
body = body.encode("ascii").decode("unicode_escape")
except UnicodeEncodeError:
body.encode("utf-8")
logger.debug("Discovered plain text MIME part")
else:
files.append(
SimpleUploadedFile(
_("email_html_body.html"),
encoding.smart_bytes(part.get_payload()),
"text/html",
)
)
logger.debug("Discovered HTML MIME part")
else:
if not name:
ext = mimetypes.guess_extension(part.get_content_type())
name = "part-%i%s" % (counter, ext)
payload = part.get_payload()
if isinstance(payload, list):
payload = payload.pop().as_string()
payloadToWrite = payload
try:
logger.debug("Try to base64 decode the attachment payload")
payloadToWrite = base64.decodestring(payload)
except (binascii.Error, TypeError):
logger.debug("Payload was not base64 encoded, using raw bytes")
payloadToWrite = payload
files.append(
SimpleUploadedFile(
name, part.get_payload(decode=True), mimetypes.guess_type(name)[0]
)
)
logger.debug("Found MIME attachment %s" % name)
counter += 1
if not body:
mail = BeautifulSoup(part.get_payload(), "lxml")
if ">" in mail.text:
message_body = mail.text.split(">")[1]
body = message_body.encode("ascii", errors="ignore")
else:
body = mail.text
if ticket:
try:
t = Ticket.objects.get(id=ticket)
except Ticket.DoesNotExist:
logger.info(
"Tracking ID %s-%s not associated with existing ticket. Creating new ticket."
% (queue.slug, ticket)
)
ticket = None
else:
logger.info(
"Found existing ticket with Tracking ID %s-%s" % (t.queue.slug, t.id)
)
if t.status == Ticket.CLOSED_STATUS:
t.status = Ticket.REOPENED_STATUS
t.save()
new = False
smtp_priority = message.get("priority", "")
smtp_importance = message.get("importance", "")
high_priority_types = {"high", "important", "1", "urgent"}
priority = 2 if high_priority_types & {smtp_priority, smtp_importance} else 3
if ticket is None:
if settings.QUEUE_EMAIL_BOX_UPDATE_ONLY:
return None
new = True
t = Ticket.objects.create(
title=subject,
queue=queue,
submitter_email=sender_email,
created=timezone.now(),
description=body,
priority=priority,
)
logger.debug("Created new ticket %s-%s" % (t.queue.slug, t.id))
if cc:
# get list of currently CC'd emails
current_cc = TicketCC.objects.filter(ticket=ticket)
current_cc_emails = [x.email for x in current_cc if x.email]
# get emails of any Users CC'd to email, if defined
# (some Users may not have an associated email, e.g, when using LDAP)
current_cc_users = [x.user.email for x in current_cc if x.user and x.user.email]
# ensure submitter, assigned user, queue email not added
other_emails = [queue.email_address]
if t.submitter_email:
other_emails.append(t.submitter_email)
if t.assigned_to:
other_emails.append(t.assigned_to.email)
current_cc = set(current_cc_emails + current_cc_users + other_emails)
# first, add any User not previously CC'd (as identified by User's email)
all_users = User.objects.all()
all_user_emails = set([x.email for x in all_users])
users_not_currently_ccd = all_user_emails.difference(set(current_cc))
users_to_cc = cc.intersection(users_not_currently_ccd)
for user in users_to_cc:
tcc = TicketCC.objects.create(
ticket=t,
user=User.objects.get(email=user),
can_view=True,
can_update=False,
)
tcc.save()
# then add remaining emails alphabetically, makes testing easy
new_cc = cc.difference(current_cc).difference(all_user_emails)
new_cc = sorted(list(new_cc))
for ccemail in new_cc:
tcc = TicketCC.objects.create(
ticket=t, email=ccemail, can_view=True, can_update=False
)
tcc.save()
f = FollowUp(
ticket=t,
title=_(
"E-Mail Received from %(sender_email)s" % {"sender_email": sender_email}
),
date=timezone.now(),
public=True,
comment=body,
)
if t.status == Ticket.REOPENED_STATUS:
f.new_status = Ticket.REOPENED_STATUS
f.title = _(
"Ticket Re-Opened by E-Mail Received from %(sender_email)s"
% {"sender_email": sender_email}
)
f.save()
logger.debug("Created new FollowUp for Ticket")
if six.PY2:
logger.info(
(
"[%s-%s] %s"
% (
t.queue.slug,
t.id,
t.title,
)
).encode("ascii", "replace")
)
elif six.PY3:
logger.info(
"[%s-%s] %s"
% (
t.queue.slug,
t.id,
t.title,
)
)
attached = process_attachments(f, files)
for att_file in attached:
logger.info(
"Attachment '%s' (with size %s) successfully added to ticket from email."
% (att_file[0], att_file[1].size)
)
context = safe_template_context(t)
if new:
if sender_email:
send_templated_mail(
"newticket_submitter",
context,
recipients=sender_email,
sender=queue.from_address,
fail_silently=True,
)
if queue.new_ticket_cc:
send_templated_mail(
"newticket_cc",
context,
recipients=queue.new_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc and queue.updated_ticket_cc != queue.new_ticket_cc:
send_templated_mail(
"newticket_cc",
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
else:
context.update(comment=f.comment)
if t.assigned_to:
send_templated_mail(
"updated_owner",
context,
recipients=t.assigned_to.email,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc:
send_templated_mail(
"updated_cc",
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
return t
|
https://github.com/django-helpdesk/django-helpdesk/issues/567
|
./manage.sh get_email
***** Sat Nov 4 13:25:02 2017: Begin processing mail for django-helpdesk
Attempting IMAP server login
Received 1 messages from IMAP server
Processing message b'1'
No tracking ID matched.
Traceback (most recent call last):
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/base64.py", line 517, in _input_type_check
m = memoryview(s)
TypeError: memoryview: a bytes-like object is required, not 'str'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/chtis/src/monkeypants/goodies-taskdesk/src/manage.py", line 22, in <module>
execute_from_command_line(sys.argv)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/helpdesk/management/commands/get_email.py", line 73, in handle
process_email(quiet=quiet)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/helpdesk/management/commands/get_email.py", line 106, in process_email
process_queue(q, logger=logger)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/helpdesk/management/commands/get_email.py", line 209, in process_queue
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/site-packages/helpdesk/management/commands/get_email.py", line 348, in ticket_from_message
payloadToWrite = base64.decodestring(payload)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/base64.py", line 561, in decodestring
return decodebytes(s)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/base64.py", line 552, in decodebytes
_input_type_check(s)
File "/home/chtis/src/monkeypants/goodies-taskdesk/.venv/lib/python3.6/base64.py", line 520, in _input_type_check
raise TypeError(msg) from err
TypeError: expected bytes-like object, not str
|
TypeError
|
def http_get(self, path, query_data={}, streamed=False, raw=False, **kwargs):
"""Make a GET request to the Gitlab server.
Args:
path (str): Path or full URL to query ('/projects' or
'http://whatever/v4/api/projecs')
query_data (dict): Data to send as query parameters
streamed (bool): Whether the data should be streamed
raw (bool): If True do not try to parse the output as json
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
A requests result object is streamed is True or the content type is
not json.
The parsed json data otherwise.
Raises:
GitlabHttpError: When the return code is not 2xx
GitlabParsingError: If the json data could not be parsed
"""
result = self.http_request(
"get", path, query_data=query_data, streamed=streamed, **kwargs
)
if (
result.headers["Content-Type"] == "application/json"
and not streamed
and not raw
):
try:
return result.json()
except Exception:
raise GitlabParsingError(error_message="Failed to parse the server message")
else:
return result
|
def http_get(self, path, query_data={}, streamed=False, **kwargs):
"""Make a GET request to the Gitlab server.
Args:
path (str): Path or full URL to query ('/projects' or
'http://whatever/v4/api/projecs')
query_data (dict): Data to send as query parameters
streamed (bool): Whether the data should be streamed
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
A requests result object is streamed is True or the content type is
not json.
The parsed json data otherwise.
Raises:
GitlabHttpError: When the return code is not 2xx
GitlabParsingError: If the json data could not be parsed
"""
result = self.http_request(
"get", path, query_data=query_data, streamed=streamed, **kwargs
)
if result.headers["Content-Type"] == "application/json" and not streamed:
try:
return result.json()
except Exception:
raise GitlabParsingError(error_message="Failed to parse the server message")
else:
return result
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def content(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return the content of a snippet.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment.
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the content could not be retrieved
Returns:
str: The snippet content
"""
path = "/snippets/%s/raw" % self.get_id()
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def content(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return the content of a snippet.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment.
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the content could not be retrieved
Returns:
str: The snippet content
"""
path = "/snippets/%s/raw" % self.get_id()
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def artifacts(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get the job artifacts.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The artifacts if `streamed` is False, None otherwise.
"""
path = "%s/%s/artifacts" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def artifacts(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get the job artifacts.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The artifacts if `streamed` is False, None otherwise.
"""
path = "%s/%s/artifacts" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def artifact(self, path, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get a single artifact file from within the job's artifacts archive.
Args:
path (str): Path of the artifact
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The artifacts if `streamed` is False, None otherwise.
"""
path = "%s/%s/artifacts/%s" % (self.manager.path, self.get_id(), path)
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def artifact(self, path, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get a single artifact file from within the job's artifacts archive.
Args:
path (str): Path of the artifact
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The artifacts if `streamed` is False, None otherwise.
"""
path = "%s/%s/artifacts/%s" % (self.manager.path, self.get_id(), path)
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def trace(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get the job trace.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The trace
"""
path = "%s/%s/trace" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def trace(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Get the job trace.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the artifacts could not be retrieved
Returns:
str: The trace
"""
path = "%s/%s/trace" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def raw(self, file_path, ref, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return the content of a file for a commit.
Args:
ref (str): ID of the commit
filepath (str): Path of the file to return
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the file could not be retrieved
Returns:
str: The file content
"""
file_path = file_path.replace("/", "%2F").replace(".", "%2E")
path = "%s/%s/raw" % (self.path, file_path)
query_data = {"ref": ref}
result = self.gitlab.http_get(
path, query_data=query_data, streamed=streamed, raw=True, **kwargs
)
return utils.response_content(result, streamed, action, chunk_size)
|
def raw(self, file_path, ref, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return the content of a file for a commit.
Args:
ref (str): ID of the commit
filepath (str): Path of the file to return
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the file could not be retrieved
Returns:
str: The file content
"""
file_path = file_path.replace("/", "%2F").replace(".", "%2E")
path = "%s/%s/raw" % (self.path, file_path)
query_data = {"ref": ref}
result = self.gitlab.http_get(
path, query_data=query_data, streamed=streamed, **kwargs
)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def content(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return the content of a snippet.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment.
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the content could not be retrieved
Returns:
str: The snippet content
"""
path = "%s/%s/raw" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def content(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return the content of a snippet.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment.
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the content could not be retrieved
Returns:
str: The snippet content
"""
path = "%s/%s/raw" % (self.manager.path, self.get_id())
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def download(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Download the archive of a project export.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
reatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
str: The blob content if streamed is False, None otherwise
"""
path = "/projects/%d/export/download" % self.project_id
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def download(self, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Download the archive of a project export.
Args:
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
reatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
str: The blob content if streamed is False, None otherwise
"""
path = "/projects/%d/export/download" % self.project_id
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def repository_raw_blob(
self, sha, streamed=False, action=None, chunk_size=1024, **kwargs
):
"""Return the raw file contents for a blob.
Args:
sha(str): ID of the blob
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
str: The blob content if streamed is False, None otherwise
"""
path = "/projects/%s/repository/blobs/%s/raw" % (self.get_id(), sha)
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def repository_raw_blob(
self, sha, streamed=False, action=None, chunk_size=1024, **kwargs
):
"""Return the raw file contents for a blob.
Args:
sha(str): ID of the blob
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
str: The blob content if streamed is False, None otherwise
"""
path = "/projects/%s/repository/blobs/%s/raw" % (self.get_id(), sha)
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def repository_archive(
self, sha=None, streamed=False, action=None, chunk_size=1024, **kwargs
):
"""Return a tarball of the repository.
Args:
sha (str): ID of the commit (default branch by default)
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server failed to perform the request
Returns:
str: The binary data of the archive
"""
path = "/projects/%s/repository/archive" % self.get_id()
query_data = {}
if sha:
query_data["sha"] = sha
result = self.manager.gitlab.http_get(
path, query_data=query_data, raw=True, streamed=streamed, **kwargs
)
return utils.response_content(result, streamed, action, chunk_size)
|
def repository_archive(
self, sha=None, streamed=False, action=None, chunk_size=1024, **kwargs
):
"""Return a tarball of the repository.
Args:
sha (str): ID of the commit (default branch by default)
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server failed to perform the request
Returns:
str: The binary data of the archive
"""
path = "/projects/%s/repository/archive" % self.get_id()
query_data = {}
if sha:
query_data["sha"] = sha
result = self.manager.gitlab.http_get(
path, query_data=query_data, streamed=streamed, **kwargs
)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def snapshot(self, wiki=False, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return a snapshot of the repository.
Args:
wiki (bool): If True return the wiki repository
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment.
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the content could not be retrieved
Returns:
str: The uncompressed tar archive of the repository
"""
path = "/projects/%d/snapshot" % self.get_id()
result = self.manager.gitlab.http_get(path, streamed=streamed, raw=True, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
def snapshot(self, wiki=False, streamed=False, action=None, chunk_size=1024, **kwargs):
"""Return a snapshot of the repository.
Args:
wiki (bool): If True return the wiki repository
streamed (bool): If True the data will be processed by chunks of
`chunk_size` and each chunk is passed to `action` for
treatment.
action (callable): Callable responsible of dealing with chunk of
data
chunk_size (int): Size of each chunk
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the content could not be retrieved
Returns:
str: The uncompressed tar archive of the repository
"""
path = "/projects/%d/snapshot" % self.get_id()
result = self.manager.gitlab.http_get(path, streamed=streamed, **kwargs)
return utils.response_content(result, streamed, action, chunk_size)
|
https://github.com/python-gitlab/python-gitlab/issues/683
|
(venv) ╭─user ~/Repositories/ExampleRepo ‹feature/ci-ci-testing*›
╰─$ python gitlab_artifact.py 1 ↵
Traceback (most recent call last):
File "gitlab_artifact.py", line 60, in <module>
main()
File "gitlab_artifact.py", line 52, in main
a = most_recent_job.artifact("ExampleRepo/cached_service_specs.json")
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/cli.py", line 43, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/exceptions.py", line 251, in wrapped_f
return f(*args, **kwargs)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/v4/objects.py", line 1397, in artifact
return utils.response_content(result, streamed, action, chunk_size)
File "/Users/lknecht/Repositories/ExampleRepo/venv/lib/python3.7/site-packages/gitlab/utils.py", line 28, in response_content
return response.content
AttributeError: 'list' object has no attribute 'content'
|
AttributeError
|
def __enter__(self):
"""
Performs some basic checks and returns itself when everything is ready to invoke a Lambda function.
:returns InvokeContext: Returns this object
"""
# Grab template from file and create a provider
self._template_dict = self._get_template_data(self._template_file)
self._function_provider = SamFunctionProvider(
self._template_dict, self.parameter_overrides
)
self._env_vars_value = self._get_env_vars_value(self._env_vars_file)
self._log_file_handle = self._setup_log_file(self._log_file)
self._debug_context = self._get_debug_context(
self._debug_ports, self._debug_args, self._debugger_path
)
self._container_manager = self._get_container_manager(
self._docker_network, self._skip_pull_image
)
if not self._container_manager.is_docker_reachable:
raise InvokeContextException(
"Running AWS SAM projects locally requires Docker. Have you got it installed and running?"
)
return self
|
def __enter__(self):
"""
Performs some basic checks and returns itself when everything is ready to invoke a Lambda function.
:returns InvokeContext: Returns this object
"""
# Grab template from file and create a provider
self._template_dict = self._get_template_data(self._template_file)
self._function_provider = SamFunctionProvider(
self._template_dict, self.parameter_overrides
)
self._env_vars_value = self._get_env_vars_value(self._env_vars_file)
self._log_file_handle = self._setup_log_file(self._log_file)
self._debug_context = self._get_debug_context(
self._debug_ports, self._debug_args, self._debugger_path
)
self._container_manager = self._get_container_manager(
self._docker_network, self._skip_pull_image
)
if not self._container_manager.is_docker_reachable:
raise InvokeContextException(
"Running AWS SAM projects locally requires Docker. Have you got it installed?"
)
return self
|
https://github.com/aws/aws-sam-cli/issues/1392
|
2019-08-30 16:23:41 Starting Build inside a container
2019-08-30 16:23:46 Building resource 'Function'
Traceback (most recent call last):
File "runpy.py", line 193, in _run_module_as_main
File "runpy.py", line 85, in _run_code
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\__main__.py", line 12, in <module>
cli(prog_name="sam")
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 697, in main
rv = self.invoke(ctx)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 94, in wrapped
raise exception # pylint: disable=raising-bad-type
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 65, in wrapped
return_value = func(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 105, in cli
skip_pull_image, parameter_overrides, mode) # pragma: no cover
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 150, in do_cli
artifacts = builder.build()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 108, in build
lambda_function.runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 199, in _build_function
runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 234, in _build_function_on_container
if not self._container_manager.is_docker_reachable:
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\local\docker\manager.py", line 50, in is_docker_reachable
self.docker_client.ping()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\client.py", line 187, in ping
return self.api.ping(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\daemon.py", line 166, in ping
return self._result(self._get(self._url('/_ping'))) == 'OK'
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\utils\decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\client.py", line 230, in _get
return self.get(url, **self._set_request_timeout(kwargs))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\adapters.py", line 449, in send
timeout=timeout
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 603, in urlopen
chunked=chunked)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 355, in _make_request
conn.request(method, url, **httplib_request_kw)
File "http\client.py", line 1239, in request
File "http\client.py", line 1285, in _send_request
File "http\client.py", line 1234, in endheaders
File "http\client.py", line 1026, in _send_output
File "http\client.py", line 964, in send
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipeconn.py", line 32, in connect
sock.connect(self.npipe_path)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 22, in wrapped
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 50, in connect
win32pipe.WaitNamedPipe(address, self._timeout)
pywintypes.error: (2, 'WaitNamedPipe', 'The system cannot find the file specified.')
|
pywintypes.error
|
def is_docker_reachable(self):
"""
Checks if Docker daemon is running. This is required for us to invoke the function locally
Returns
-------
bool
True, if Docker is available, False otherwise
"""
errors = (
docker.errors.APIError,
requests.exceptions.ConnectionError,
)
if platform.system() == "Windows":
import pywintypes # pylint: disable=import-error
errors += (pywintypes.error,) # pylint: disable=no-member
try:
self.docker_client.ping()
return True
# When Docker is not installed, a request.exceptions.ConnectionError is thrown.
# and also windows-specific errors
except errors:
LOG.debug("Docker is not reachable", exc_info=True)
return False
|
def is_docker_reachable(self):
"""
Checks if Docker daemon is running. This is required for us to invoke the function locally
Returns
-------
bool
True, if Docker is available, False otherwise
"""
try:
self.docker_client.ping()
return True
# When Docker is not installed, a request.exceptions.ConnectionError is thrown.
except (docker.errors.APIError, requests.exceptions.ConnectionError):
LOG.debug("Docker is not reachable", exc_info=True)
return False
|
https://github.com/aws/aws-sam-cli/issues/1392
|
2019-08-30 16:23:41 Starting Build inside a container
2019-08-30 16:23:46 Building resource 'Function'
Traceback (most recent call last):
File "runpy.py", line 193, in _run_module_as_main
File "runpy.py", line 85, in _run_code
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\__main__.py", line 12, in <module>
cli(prog_name="sam")
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 697, in main
rv = self.invoke(ctx)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 94, in wrapped
raise exception # pylint: disable=raising-bad-type
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\telemetry\metrics.py", line 65, in wrapped
return_value = func(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 105, in cli
skip_pull_image, parameter_overrides, mode) # pragma: no cover
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\commands\build\command.py", line 150, in do_cli
artifacts = builder.build()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 108, in build
lambda_function.runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 199, in _build_function
runtime)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\lib\build\app_builder.py", line 234, in _build_function_on_container
if not self._container_manager.is_docker_reachable:
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\samcli\local\docker\manager.py", line 50, in is_docker_reachable
self.docker_client.ping()
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\client.py", line 187, in ping
return self.api.ping(*args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\daemon.py", line 166, in ping
return self._result(self._get(self._url('/_ping'))) == 'OK'
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\utils\decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\api\client.py", line 230, in _get
return self.get(url, **self._set_request_timeout(kwargs))
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 546, in get
return self.request('GET', url, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\requests\adapters.py", line 449, in send
timeout=timeout
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 603, in urlopen
chunked=chunked)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\urllib3\connectionpool.py", line 355, in _make_request
conn.request(method, url, **httplib_request_kw)
File "http\client.py", line 1239, in request
File "http\client.py", line 1285, in _send_request
File "http\client.py", line 1234, in endheaders
File "http\client.py", line 1026, in _send_output
File "http\client.py", line 964, in send
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipeconn.py", line 32, in connect
sock.connect(self.npipe_path)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 22, in wrapped
return f(self, *args, **kwargs)
File "C:\Program Files\Amazon\AWSSAMCLI\runtime\lib\site-packages\docker\transport\npipesocket.py", line 50, in connect
win32pipe.WaitNamedPipe(address, self._timeout)
pywintypes.error: (2, 'WaitNamedPipe', 'The system cannot find the file specified.')
|
pywintypes.error
|
def _convert_sam_function_resource(name, resource_properties, layers):
"""
Converts a AWS::Serverless::Function resource to a Function configuration usable by the provider.
:param string name: LogicalID of the resource NOTE: This is *not* the function name because not all functions
declare a name
:param dict resource_properties: Properties of this resource
:return samcli.commands.local.lib.provider.Function: Function configuration
"""
codeuri = SamFunctionProvider._extract_sam_function_codeuri(
name, resource_properties, "CodeUri"
)
LOG.debug(
"Found Serverless function with name='%s' and CodeUri='%s'", name, codeuri
)
timeout = resource_properties.get("Timeout")
if isinstance(timeout, str):
try:
timeout = ast.literal_eval(timeout)
except ValueError:
raise InvalidSamTemplateException(
"Invalid Number for Timeout: {}".format(timeout)
)
return Function(
name=name,
runtime=resource_properties.get("Runtime"),
memory=resource_properties.get("MemorySize"),
timeout=timeout,
handler=resource_properties.get("Handler"),
codeuri=codeuri,
environment=resource_properties.get("Environment"),
rolearn=resource_properties.get("Role"),
layers=layers,
)
|
def _convert_sam_function_resource(name, resource_properties, layers):
"""
Converts a AWS::Serverless::Function resource to a Function configuration usable by the provider.
:param string name: LogicalID of the resource NOTE: This is *not* the function name because not all functions
declare a name
:param dict resource_properties: Properties of this resource
:return samcli.commands.local.lib.provider.Function: Function configuration
"""
codeuri = SamFunctionProvider._extract_sam_function_codeuri(
name, resource_properties, "CodeUri"
)
LOG.debug(
"Found Serverless function with name='%s' and CodeUri='%s'", name, codeuri
)
return Function(
name=name,
runtime=resource_properties.get("Runtime"),
memory=resource_properties.get("MemorySize"),
timeout=resource_properties.get("Timeout"),
handler=resource_properties.get("Handler"),
codeuri=codeuri,
environment=resource_properties.get("Environment"),
rolearn=resource_properties.get("Role"),
layers=layers,
)
|
https://github.com/aws/aws-sam-cli/issues/925
|
2019-01-11 00:29:55 Found one Lambda function with name 'provisionAppSlaves'
2019-01-11 00:29:55 Found one Lambda function with name 'provisionSeleniumStandalones'
2019-01-11 00:29:55 Invoking index.provisionAppSlaves (nodejs8.10)
2019-01-11 00:29:55 Invoking index.provisionSeleniumStandalones (nodejs8.10)
2019-01-11 00:29:55 Environment variables overrides data is standard format
2019-01-11 00:29:55 Environment variables overrides data is standard format
2019-01-11 00:29:55 Loading AWS credentials from session with profile 'None'
2019-01-11 00:29:55 Loading AWS credentials from session with profile 'None'
2019-01-11 00:29:55 Changing event name from creating-client-class.iot-data to creating-client-class.iot-data-plane
2019-01-11 00:29:55 Changing event name from creating-client-class.iot-data to creating-client-class.iot-data-plane
2019-01-11 00:29:55 Changing event name from before-call.apigateway to before-call.api-gateway
2019-01-11 00:29:55 Changing event name from before-call.apigateway to before-call.api-gateway
2019-01-11 00:29:55 Changing event name from request-created.machinelearning.Predict to request-created.machine-learning.Predict
2019-01-11 00:29:55 Changing event name from request-created.machinelearning.Predict to request-created.machine-learning.Predict
2019-01-11 00:29:55 Changing event name from before-parameter-build.autoscaling.CreateLaunchConfiguration to before-parameter-build.auto-scaling.CreateLaunchConfiguration
2019-01-11 00:29:55 Changing event name from before-parameter-build.autoscaling.CreateLaunchConfiguration to before-parameter-build.auto-scaling.CreateLaunchConfiguration
2019-01-11 00:29:55 Changing event name from before-parameter-build.route53 to before-parameter-build.route-53
2019-01-11 00:29:55 Changing event name from before-parameter-build.route53 to before-parameter-build.route-53
2019-01-11 00:29:55 Changing event name from request-created.cloudsearchdomain.Search to request-created.cloudsearch-domain.Search
2019-01-11 00:29:55 Changing event name from request-created.cloudsearchdomain.Search to request-created.cloudsearch-domain.Search
2019-01-11 00:29:55 Changing event name from docs.*.autoscaling.CreateLaunchConfiguration.complete-section to docs.*.auto-scaling.CreateLaunchConfiguration.complete-section
2019-01-11 00:29:55 Changing event name from docs.*.autoscaling.CreateLaunchConfiguration.complete-section to docs.*.auto-scaling.CreateLaunchConfiguration.complete-section
2019-01-11 00:29:55 Changing event name from before-parameter-build.cloudsearchdomain.Search to before-parameter-build.cloudsearch-domain.Search
2019-01-11 00:29:55 Changing event name from before-parameter-build.cloudsearchdomain.Search to before-parameter-build.cloudsearch-domain.Search
2019-01-11 00:29:55 Changing event name from docs.*.cloudsearchdomain.Search.complete-section to docs.*.cloudsearch-domain.Search.complete-section
2019-01-11 00:29:55 Changing event name from docs.*.cloudsearchdomain.Search.complete-section to docs.*.cloudsearch-domain.Search.complete-section
2019-01-11 00:29:55 Changing event name from before-parameter-build.logs.CreateExportTask to before-parameter-build.cloudwatch-logs.CreateExportTask
2019-01-11 00:29:55 Changing event name from before-parameter-build.logs.CreateExportTask to before-parameter-build.cloudwatch-logs.CreateExportTask
2019-01-11 00:29:55 Changing event name from docs.*.logs.CreateExportTask.complete-section to docs.*.cloudwatch-logs.CreateExportTask.complete-section
2019-01-11 00:29:55 Changing event name from docs.*.logs.CreateExportTask.complete-section to docs.*.cloudwatch-logs.CreateExportTask.complete-section
2019-01-11 00:29:55 Looking for credentials via: env
2019-01-11 00:29:55 Looking for credentials via: assume-role
2019-01-11 00:29:55 Looking for credentials via: shared-credentials-file
2019-01-11 00:29:55 Found credentials in shared credentials file: ~/.aws/credentials
2019-01-11 00:29:55 Resolving code path. Cwd=/Source/purpleteam-lambda, CodeUri=./selenium-standalone-provisioner
2019-01-11 00:29:55 Resolved absolute path to code is /Source/purpleteam-lambda/selenium-standalone-provisioner
2019-01-11 00:29:55 Code /Source/purpleteam-lambda/selenium-standalone-provisioner is not a zip/jar file
2019-01-11 00:29:55 Trying paths: ['.docker/config.json', '.dockercfg']
2019-01-11 00:29:55 No config file found
2019-01-11 00:29:55 Trying paths: ['.docker/config.json', '.dockercfg']
2019-01-11 00:29:55 No config file found
2019-01-11 00:29:55 http://localhost:None "GET /v1.35/images/lambci/lambda:nodejs8.10/json HTTP/1.1" 200 None
2019-01-11 00:29:55 Looking for auth config
2019-01-11 00:29:55 No auth config in memory - loading from filesystem
2019-01-11 00:29:55 Trying paths: ['.docker/config.json', '.dockercfg']
2019-01-11 00:29:55 No config file found
2019-01-11 00:29:55 Looking for auth entry for 'docker.io'
2019-01-11 00:29:55 No entry found
2019-01-11 00:29:55 No auth config found
2019-01-11 00:29:55 Looking for credentials via: env
2019-01-11 00:29:55 Looking for credentials via: assume-role
2019-01-11 00:29:55 Looking for credentials via: shared-credentials-file
2019-01-11 00:29:55 Found credentials in shared credentials file: ~/.aws/credentials
2019-01-11 00:29:55 Resolving code path. Cwd=/Source/purpleteam-lambda, CodeUri=./app-slave-provisioner
2019-01-11 00:29:55 Resolved absolute path to code is /Source/purpleteam-lambda/app-slave-provisioner
2019-01-11 00:29:55 Code /Source/purpleteam-lambda/app-slave-provisioner is not a zip/jar file
2019-01-11 00:29:55 Trying paths: ['.docker/config.json', '.dockercfg']
2019-01-11 00:29:55 No config file found
2019-01-11 00:29:55 Trying paths: ['.docker/config.json', '.dockercfg']
2019-01-11 00:29:55 No config file found
2019-01-11 00:29:55 http://localhost:None "GET /v1.35/images/lambci/lambda:nodejs8.10/json HTTP/1.1" 200 None
2019-01-11 00:29:55 Looking for auth config
2019-01-11 00:29:55 No auth config in memory - loading from filesystem
2019-01-11 00:29:55 Trying paths: ['.docker/config.json', '.dockercfg']
2019-01-11 00:29:55 No config file found
2019-01-11 00:29:55 Looking for auth entry for 'docker.io'
2019-01-11 00:29:55 No entry found
2019-01-11 00:29:55 No auth config found
2019-01-11 00:29:58 http://localhost:None "POST /v1.35/images/create?tag=nodejs8.10&fromImage=lambci%2Flambda HTTP/1.1" 200 None
Fetching lambci/lambda:nodejs8.10 Docker container image......
2019-01-11 00:29:58 Mounting /Source/purpleteam-lambda/selenium-standalone-provisioner as /var/task:ro inside runtime container
2019-01-11 00:29:58 http://localhost:None "POST /v1.35/images/create?tag=nodejs8.10&fromImage=lambci%2Flambda HTTP/1.1" 200 None
Fetching lambci/lambda:nodejs8.10 Docker container image......
2019-01-11 00:29:58 Mounting /Source/purpleteam-lambda/app-slave-provisioner as /var/task:ro inside runtime container
2019-01-11 00:29:58 http://localhost:None "POST /v1.35/containers/create HTTP/1.1" 201 201
2019-01-11 00:29:58 http://localhost:None "GET /v1.35/containers/156c0040698b3e7be8d314baf595b8f3882711184e9dfb900a05e72662c5990d/json HTTP/1.1" 200 None
2019-01-11 00:29:58 http://localhost:None "GET /v1.35/networks/compose_pt-net HTTP/1.1" 200 None
2019-01-11 00:29:58 http://localhost:None "POST /v1.35/containers/create HTTP/1.1" 201 201
2019-01-11 00:29:58 http://localhost:None "GET /v1.35/containers/7aa23728a90bcd90a5155579521635a953205a1345f57a00bab825c6fd1ff0a9/json HTTP/1.1" 200 None
2019-01-11 00:29:58 http://localhost:None "GET /v1.35/networks/compose_pt-net HTTP/1.1" 200 None
2019-01-11 00:29:58 http://localhost:None "POST /v1.35/networks/a4f1162d57b3f5aff37729656ef2a506c27a264cd7665ec0703a4ea9334f988c/connect HTTP/1.1" 200 0
2019-01-11 00:29:58 http://localhost:None "GET /v1.35/containers/156c0040698b3e7be8d314baf595b8f3882711184e9dfb900a05e72662c5990d/json HTTP/1.1" 200 None
2019-01-11 00:29:58 http://localhost:None "POST /v1.35/networks/a4f1162d57b3f5aff37729656ef2a506c27a264cd7665ec0703a4ea9334f988c/connect HTTP/1.1" 200 0
2019-01-11 00:29:58 http://localhost:None "GET /v1.35/containers/7aa23728a90bcd90a5155579521635a953205a1345f57a00bab825c6fd1ff0a9/json HTTP/1.1" 200 None
2019-01-11 00:29:59 http://localhost:None "POST /v1.35/containers/156c0040698b3e7be8d314baf595b8f3882711184e9dfb900a05e72662c5990d/start HTTP/1.1" 204 0
2019-01-11 00:29:59 Starting a timer for 3 seconds for function 'provisionSeleniumStandalones'
Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 1071, in run
self.finished.wait(self.interval)
File "/usr/lib/python2.7/threading.py", line 614, in wait
self.__cond.wait(timeout)
File "/usr/lib/python2.7/threading.py", line 349, in wait
endtime = _time() + timeout
TypeError: unsupported operand type(s) for +: 'float' and 'str'
2019-01-11 00:29:59 http://localhost:None "GET /v1.35/containers/156c0040698b3e7be8d314baf595b8f3882711184e9dfb900a05e72662c5990d/json HTTP/1.1" 200 None
2019-01-11 00:29:59 http://localhost:None "POST /containers/156c0040698b3e7be8d314baf595b8f3882711184e9dfb900a05e72662c5990d/attach?stream=1&stdin=0&logs=1&stderr=1&stdout=1 HTTP/1.1" 101 0
START RequestId: 4de742e6-2284-13c9-520f-fb2996acc87e Version: $LATEST
2019-01-11 00:29:59 http://localhost:None "POST /v1.35/containers/7aa23728a90bcd90a5155579521635a953205a1345f57a00bab825c6fd1ff0a9/start HTTP/1.1" 204 0
2019-01-11 00:29:59 Starting a timer for 3 seconds for function 'provisionAppSlaves'
Exception in thread Thread-4:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 1071, in run
self.finished.wait(self.interval)
File "/usr/lib/python2.7/threading.py", line 614, in wait
self.__cond.wait(timeout)
File "/usr/lib/python2.7/threading.py", line 349, in wait
endtime = _time() + timeout
TypeError: unsupported operand type(s) for +: 'float' and 'str'
2019-01-11 00:29:59 http://localhost:None "GET /v1.35/containers/7aa23728a90bcd90a5155579521635a953205a1345f57a00bab825c6fd1ff0a9/json HTTP/1.1" 200 None
2019-01-11 00:29:59 http://localhost:None "POST /containers/7aa23728a90bcd90a5155579521635a953205a1345f57a00bab825c6fd1ff0a9/attach?stream=1&stdin=0&logs=1&stderr=1&stdout=1 HTTP/1.1" 101 0
START RequestId: c591f426-93b2-1683-1de2-db6671d97edc Version: $LATEST
END RequestId: c591f426-93b2-1683-1de2-db6671d97edc
REPORT RequestId: c591f426-93b2-1683-1de2-db6671d97edc Duration: 85.55 ms Billed Duration: 100 ms Memory Size: 128 MB Max Memory Used: 39 MB
2019-01-11 00:30:00 http://localhost:None "GET /v1.35/containers/7aa23728a90bcd90a5155579521635a953205a1345f57a00bab825c6fd1ff0a9/json HTTP/1.1" 200 None
2019-01-11 00:30:00 http://localhost:None "DELETE /v1.35/containers/7aa23728a90bcd90a5155579521635a953205a1345f57a00bab825c6fd1ff0a9?force=True&link=False&v=False HTTP/1.1" 204 0
|
TypeError
|
def installation_id(self):
"""
Returns the installation UUID for this AWS SAM CLI installation. If the
installation id has not yet been set, it will be set before returning.
Examples
--------
>>> gc = GlobalConfig()
>>> gc.installation_id
"7b7d4db7-2f54-45ba-bf2f-a2cbc9e74a34"
>>> gc = GlobalConfig()
>>> gc.installation_id
None
Returns
-------
A string containing the installation UUID, or None in case of an error.
"""
if self._installation_id:
return self._installation_id
try:
self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)
return self._installation_id
except (ValueError, IOError, OSError):
return None
|
def installation_id(self):
"""
Returns the installation UUID for this AWS SAM CLI installation. If the
installation id has not yet been set, it will be set before returning.
Examples
--------
>>> gc = GlobalConfig()
>>> gc.installation_id
"7b7d4db7-2f54-45ba-bf2f-a2cbc9e74a34"
>>> gc = GlobalConfig()
>>> gc.installation_id
None
Returns
-------
A string containing the installation UUID, or None in case of an error.
"""
if self._installation_id:
return self._installation_id
try:
self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)
return self._installation_id
except (ValueError, IOError):
return None
|
https://github.com/aws/aws-sam-cli/issues/1313
|
$ docker run -t -d -u 3000:100 --network=host -w /data/hudson/workspace/loper-portal_feature_jenkinstest -v /data/hudson/workspace/loper-portal_feature_jenkinstest:/data/hudson/workspace/loper-portal_feature_jenkinstest:rw,z -v /data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:/data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** xxxx-aws-sam-cli cat
$ docker top 04fa30b17ceb0ae6d45b66190f32f4bad8dedd57386352a31e61f0da6ac18aa2 -eo pid,comm
[Pipeline] {
[Pipeline] withEnv
[Pipeline] {
[Pipeline] withCredentials
Masking supported pattern matches of $AWS_ACCESS_KEY_ID or $AWS_SECRET_ACCESS_KEY
[Pipeline] {
[Pipeline] stage
[Pipeline] { (Validate CloudFormation template)
[Pipeline] sh
+ sam validate --debug -t cloudformation/template.yaml
Traceback (most recent call last):
File "/usr/bin/sam", line 11, in <module>
sys.exit(cli())
File "/usr/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 1063, in invoke
Command.invoke(self, ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/samcli/cli/main.py", line 83, in cli
if global_cfg.telemetry_enabled is None:
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 113, in telemetry_enabled
self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 145, in _get_value
cfg_path = self._get_config_file_path(CONFIG_FILENAME)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 170, in _get_config_file_path
self._create_dir()
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 167, in _create_dir
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1540, in mkdir
_try_except_filenotfounderror(_try_func, _exc_func)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 113, in _try_except_filenotfounderror
try_func()
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1531, in _try_func
self._accessor.mkdir(self, mode)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 585, in wrapped
return strfunc(str(pathobj), *args)
OSError: [Errno 13] Permission denied: '/.aws-sam'
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
|
OSError
|
def _create_dir(self):
"""
Creates configuration directory if it does not already exist, otherwise does nothing.
May raise an OSError if we do not have permissions to create the directory.
"""
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
|
def _create_dir(self):
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
|
https://github.com/aws/aws-sam-cli/issues/1313
|
$ docker run -t -d -u 3000:100 --network=host -w /data/hudson/workspace/loper-portal_feature_jenkinstest -v /data/hudson/workspace/loper-portal_feature_jenkinstest:/data/hudson/workspace/loper-portal_feature_jenkinstest:rw,z -v /data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:/data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** xxxx-aws-sam-cli cat
$ docker top 04fa30b17ceb0ae6d45b66190f32f4bad8dedd57386352a31e61f0da6ac18aa2 -eo pid,comm
[Pipeline] {
[Pipeline] withEnv
[Pipeline] {
[Pipeline] withCredentials
Masking supported pattern matches of $AWS_ACCESS_KEY_ID or $AWS_SECRET_ACCESS_KEY
[Pipeline] {
[Pipeline] stage
[Pipeline] { (Validate CloudFormation template)
[Pipeline] sh
+ sam validate --debug -t cloudformation/template.yaml
Traceback (most recent call last):
File "/usr/bin/sam", line 11, in <module>
sys.exit(cli())
File "/usr/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 1063, in invoke
Command.invoke(self, ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/samcli/cli/main.py", line 83, in cli
if global_cfg.telemetry_enabled is None:
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 113, in telemetry_enabled
self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 145, in _get_value
cfg_path = self._get_config_file_path(CONFIG_FILENAME)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 170, in _get_config_file_path
self._create_dir()
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 167, in _create_dir
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1540, in mkdir
_try_except_filenotfounderror(_try_func, _exc_func)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 113, in _try_except_filenotfounderror
try_func()
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1531, in _try_func
self._accessor.mkdir(self, mode)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 585, in wrapped
return strfunc(str(pathobj), *args)
OSError: [Errno 13] Permission denied: '/.aws-sam'
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
|
OSError
|
def create(self):
"""
Calls Docker API to creates the Docker container instance. Creating the container does *not* run the container.
Use ``start`` method to run the container
:return string: ID of the created container
:raise RuntimeError: If this method is called after a container already has been created
"""
if self.is_created():
raise RuntimeError("This container already exists. Cannot create again.")
LOG.info(
"Mounting %s as %s:ro inside runtime container",
self._host_dir,
self._working_dir,
)
kwargs = {
"command": self._cmd,
"working_dir": self._working_dir,
"volumes": {
self._host_dir: {
# Mount the host directory as "read only" directory inside container at working_dir
# https://docs.docker.com/storage/bind-mounts
# Mount the host directory as "read only" inside container
"bind": self._working_dir,
"mode": "ro",
}
},
# We are not running an interactive shell here.
"tty": False,
}
if self._container_opts:
kwargs.update(self._container_opts)
if self._additional_volumes:
kwargs["volumes"].update(self._additional_volumes)
# Make sure all mounts are of posix path style.
kwargs["volumes"] = {
to_posix_path(host_dir): mount for host_dir, mount in kwargs["volumes"].items()
}
if self._env_vars:
kwargs["environment"] = self._env_vars
if self._exposed_ports:
kwargs["ports"] = self._exposed_ports
if self._entrypoint:
kwargs["entrypoint"] = self._entrypoint
if self._memory_limit_mb:
# Ex: 128m => 128MB
kwargs["mem_limit"] = "{}m".format(self._memory_limit_mb)
if self.network_id == "host":
kwargs["network_mode"] = self.network_id
real_container = self.docker_client.containers.create(self._image, **kwargs)
self.id = real_container.id
if self.network_id and self.network_id != "host":
network = self.docker_client.networks.get(self.network_id)
network.connect(self.id)
return self.id
|
def create(self):
"""
Calls Docker API to creates the Docker container instance. Creating the container does *not* run the container.
Use ``start`` method to run the container
:return string: ID of the created container
:raise RuntimeError: If this method is called after a container already has been created
"""
if self.is_created():
raise RuntimeError("This container already exists. Cannot create again.")
LOG.info(
"Mounting %s as %s:ro inside runtime container",
self._host_dir,
self._working_dir,
)
kwargs = {
"command": self._cmd,
"working_dir": self._working_dir,
"volumes": {
self._host_dir: {
# Mount the host directory as "read only" directory inside container at working_dir
# https://docs.docker.com/storage/bind-mounts
# Mount the host directory as "read only" inside container
"bind": self._working_dir,
"mode": "ro",
}
},
# We are not running an interactive shell here.
"tty": False,
}
if self._container_opts:
kwargs.update(self._container_opts)
if self._additional_volumes:
kwargs["volumes"].update(self._additional_volumes)
# Make sure all mounts are of posix path style.
kwargs["volumes"] = {
to_posix_path(host_dir): mount for host_dir, mount in kwargs["volumes"].items()
}
if self._env_vars:
kwargs["environment"] = self._env_vars
if self._exposed_ports:
kwargs["ports"] = self._exposed_ports
if self._entrypoint:
kwargs["entrypoint"] = self._entrypoint
if self._memory_limit_mb:
# Ex: 128m => 128MB
kwargs["mem_limit"] = "{}m".format(self._memory_limit_mb)
real_container = self.docker_client.containers.create(self._image, **kwargs)
self.id = real_container.id
if self.network_id:
network = self.docker_client.networks.get(self.network_id)
network.connect(self.id)
return self.id
|
https://github.com/aws/aws-sam-cli/issues/669
|
+ sam local start-api --docker-network host --debug
2018-09-17 11:36:54 local start-api command is called
2018-09-17 11:36:54 2 resources found in the template
2018-09-17 11:36:54 Found Serverless function with name='ExampleFunction' and CodeUri='.'
2018-09-17 11:36:54 Trying paths: ['/home/xyz/.docker/config.json', '/home/xyz/.dockercfg']
2018-09-17 11:36:54 Found file at path: /home/xyz/.docker/config.json
2018-09-17 11:36:54 Found 'auths' section
2018-09-17 11:36:54 Found entry (registry=u'https://index.docker.io/v1/', username=u'xyz')
2018-09-17 11:36:54 Found entry (registry=u'https://12345.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:36:54 Found entry (registry=u'6789.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:36:54 Found entry (registry=u'12345.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:36:54 http://localhost:None "GET /v1.35/_ping HTTP/1.1" 200 2
2018-09-17 11:36:54 2 resources found in the template
2018-09-17 11:36:54 Found '1' API Events in Serverless function with name 'ExampleFunction'
2018-09-17 11:36:54 Detected Inline Swagger definition
2018-09-17 11:36:54 Lambda function integration not found in Swagger document at path='/venues' method='get'
2018-09-17 11:36:54 Found '0' APIs in resource 'ServerlessRestApi'
2018-09-17 11:36:54 Removed duplicates from '0' Explicit APIs and '1' Implicit APIs to produce '1' APIs
2018-09-17 11:36:54 1 APIs found in the template
2018-09-17 11:36:54 Trying paths: ['/home/xyz/.docker/config.json', '/home/xyz/.dockercfg']
2018-09-17 11:36:54 Found file at path: /home/xyz/.docker/config.json
2018-09-17 11:36:54 Found 'auths' section
2018-09-17 11:36:54 Found entry (registry=u'https://index.docker.io/v1/', username=u'xyz')
2018-09-17 11:36:54 Found entry (registry=u'https://12345.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:36:54 Found entry (registry=u'6789.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:36:54 Found entry (registry=u'12345.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:36:54 Mounting ExampleFunction at http://127.0.0.1:3000/venues [GET]
2018-09-17 11:36:54 You can now browse to the above endpoints to invoke your functions. You do not need to restart/reload SAM CLI while working on your functions changes will be reflected instantly/automatically. You only need to restart SAM CLI if you update your AWS SAM template
2018-09-17 11:36:54 Localhost server is starting up. Multi-threading = True
2018-09-17 11:36:54 * Running on http://127.0.0.1:3000/ (Press CTRL+C to quit)
2018-09-17 11:37:19 Constructed String representation of Event to invoke Lambda. Event: {"body": null, "httpMethod": "GET", "resource": "/venues", "queryStringParameters": null, "requestContext": {"httpMethod": "GET", "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef", "path": "/venues", "extendedRequestId": null, "resourceId": "123456", "apiId": "1234567890", "stage": "prod", "resourcePath": "/venues", "identity": {"accountId": null, "apiKey": null, "userArn": null, "cognitoAuthenticationProvider": null, "cognitoIdentityPoolId": null, "userAgent": "Custom User Agent String", "caller": null, "cognitoAuthenticationType": null, "sourceIp": "127.0.0.1", "user": null}, "accountId": "123456789012"}, "headers": {"Accept-Encoding": "gzip, deflate", "X-Forwarded-Port": "3000", "Connection": "keep-alive", "Accept": "*/*", "User-Agent": "PostmanRuntime/7.2.0", "Host": "127.0.0.1:3000", "X-Forwarded-Proto": "http", "Cache-Control": "no-cache", "Postman-Token": "82dbfff5-7f82-400c-9cc1-70c19755a98d"}, "stageVariables": null, "path": "/venues", "pathParameters": null, "isBase64Encoded": false}
2018-09-17 11:37:19 Found one Lambda function with name 'ExampleFunction'
2018-09-17 11:37:19 Invoking test-index.run (nodejs6.10)
2018-09-17 11:37:19 Environment variables overrides data is standard format
2018-09-17 11:37:19 Loading AWS credentials from session with profile 'None'
2018-09-17 11:37:19 Changing event name from creating-client-class.iot-data to creating-client-class.iot-data-plane
2018-09-17 11:37:19 Changing event name from before-call.apigateway to before-call.api-gateway
2018-09-17 11:37:19 Changing event name from request-created.machinelearning.Predict to request-created.machine-learning.Predict
2018-09-17 11:37:19 Changing event name from before-parameter-build.autoscaling.CreateLaunchConfiguration to before-parameter-build.auto-scaling.CreateLaunchConfiguration
2018-09-17 11:37:19 Changing event name from before-parameter-build.route53 to before-parameter-build.route-53
2018-09-17 11:37:19 Changing event name from request-created.cloudsearchdomain.Search to request-created.cloudsearch-domain.Search
2018-09-17 11:37:19 Changing event name from docs.*.autoscaling.CreateLaunchConfiguration.complete-section to docs.*.auto-scaling.CreateLaunchConfiguration.complete-section
2018-09-17 11:37:19 Changing event name from before-parameter-build.cloudsearchdomain.Search to before-parameter-build.cloudsearch-domain.Search
2018-09-17 11:37:19 Changing event name from docs.*.cloudsearchdomain.Search.complete-section to docs.*.cloudsearch-domain.Search.complete-section
2018-09-17 11:37:19 Changing event name from before-parameter-build.logs.CreateExportTask to before-parameter-build.cloudwatch-logs.CreateExportTask
2018-09-17 11:37:19 Changing event name from docs.*.logs.CreateExportTask.complete-section to docs.*.cloudwatch-logs.CreateExportTask.complete-section
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable config_file from defaults.
2018-09-17 11:37:19 Loading variable credentials_file from defaults.
2018-09-17 11:37:19 Loading variable data_path from defaults.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable credentials_file from defaults.
2018-09-17 11:37:19 Loading variable config_file from defaults.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable metadata_service_timeout from defaults.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable metadata_service_num_attempts from defaults.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Looking for credentials via: env
2018-09-17 11:37:19 Looking for credentials via: assume-role
2018-09-17 11:37:19 Looking for credentials via: shared-credentials-file
2018-09-17 11:37:19 Found credentials in shared credentials file: ~/.aws/credentials
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable region from config file with value 'us-west-2'.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable region from config file with value 'us-west-2'.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable profile from defaults.
2018-09-17 11:37:19 Loading variable region from config file with value 'us-west-2'.
2018-09-17 11:37:19 Resolving code path. Cwd=/home/xyz/workspace/foo/bar/local_build, CodeUri=.
2018-09-17 11:37:19 Resolved absolute path to code is /home/xyz/workspace/foo/bar/local_build
2018-09-17 11:37:19 Code /home/xyz/workspace/foo/bar/local_build is not a zip/jar file
2018-09-17 11:37:19 Trying paths: ['/home/xyz/.docker/config.json', '/home/xyz/.dockercfg']
2018-09-17 11:37:19 Found file at path: /home/xyz/.docker/config.json
2018-09-17 11:37:19 Found 'auths' section
2018-09-17 11:37:19 Found entry (registry=u'https://index.docker.io/v1/', username=u'xyz')
2018-09-17 11:37:19 Found entry (registry=u'https://12345.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:37:19 Found entry (registry=u'6789.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:37:19 Found entry (registry=u'12345.dkr.ecr.eu-west-1.amazonaws.com', username=u'AWS')
2018-09-17 11:37:19 http://localhost:None "GET /v1.35/images/lambci/lambda:nodejs6.10/json HTTP/1.1" 200 None
2018-09-17 11:37:19 Looking for auth config
2018-09-17 11:37:19 Looking for auth entry for 'docker.io'
2018-09-17 11:37:19 Found u'https://index.docker.io/v1/'
2018-09-17 11:37:19 Found auth config
2018-09-17 11:37:21 http://localhost:None "POST /v1.35/images/create?tag=nodejs6.10&fromImage=lambci%2Flambda HTTP/1.1" 200 None
Fetching lambci/lambda:nodejs6.10 Docker container image......
2018-09-17 11:37:21 Mounting /home/xyz/workspace/foo/bar/local_build as /var/task:ro inside runtime container
2018-09-17 11:37:21 http://localhost:None "POST /v1.35/containers/create HTTP/1.1" 201 201
2018-09-17 11:37:21 http://localhost:None "GET /v1.35/containers/3f82b1088218100d717f95b2f4888406a87e7ea18a469677dc43769163f0623c/json HTTP/1.1" 200 None
2018-09-17 11:37:21 http://localhost:None "GET /v1.35/networks/host HTTP/1.1" 200 536
2018-09-17 11:37:21 http://localhost:None "POST /v1.35/networks/bcef0cb45fb4bd0cbabd58f232af039d4fc54af1d41c9a645ea3391cb47398a4/connect HTTP/1.1" 500 94
2018-09-17 11:37:21 http://localhost:None "GET /v1.35/containers/3f82b1088218100d717f95b2f4888406a87e7ea18a469677dc43769163f0623c/json HTTP/1.1" 200 None
2018-09-17 11:37:21 http://localhost:None "DELETE /v1.35/containers/3f82b1088218100d717f95b2f4888406a87e7ea18a469677dc43769163f0623c?force=True&link=False&v=False HTTP/1.1" 204 0
2018-09-17 11:37:21 Exception on /venues [GET]
Traceback (most recent call last):
File "/home/xyz/.local/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/home/xyz/.local/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/xyz/.local/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/xyz/.local/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/home/xyz/.local/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/xyz/.local/lib/python2.7/site-packages/samcli/local/apigw/local_apigw_service.py", line 140, in _request_handler
self.lambda_runner.invoke(route.function_name, event, stdout=stdout_stream, stderr=self.stderr)
File "/home/xyz/.local/lib/python2.7/site-packages/samcli/commands/local/lib/local_lambda.py", line 80, in invoke
self.local_runtime.invoke(config, event, debug_context=self.debug_context, stdout=stdout, stderr=stderr)
File "/home/xyz/.local/lib/python2.7/site-packages/samcli/local/lambdafn/runtime.py", line 79, in invoke
self._container_manager.run(container)
File "/home/xyz/.local/lib/python2.7/site-packages/samcli/local/docker/manager.py", line 61, in run
container.create()
File "/home/xyz/.local/lib/python2.7/site-packages/samcli/local/docker/container.py", line 125, in create
network.connect(self.id)
File "/home/xyz/.local/lib/python2.7/site-packages/docker/models/networks.py", line 57, in connect
container, self.id, *args, **kwargs
File "/home/xyz/.local/lib/python2.7/site-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/home/xyz/.local/lib/python2.7/site-packages/docker/api/network.py", line 248, in connect_container_to_network
self._raise_for_status(res)
File "/home/xyz/.local/lib/python2.7/site-packages/docker/api/client.py", line 231, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/home/xyz/.local/lib/python2.7/site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
APIError: 500 Server Error: Internal Server Error ("container cannot be disconnected from host network or connected to host network")
2018-09-17 11:37:21 127.0.0.1 - - [17/Sep/2018 11:37:21] "GET /venues HTTP/1.1" 502 -
|
APIError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.