after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __call__(self, left, right):
empty_left, empty_right = self._make_data(left), self._make_data(right)
# this `merge` will check whether the combination of those arguments is valid
merged = empty_left.merge(
empty_right,
how=self.how,
on=self.on,
left_on=self.left_on,
right_on=self.right_on,
left_index=self.left_index,
right_index=self.right_index,
sort=self.sort,
suffixes=self.suffixes,
copy=self.copy_,
indicator=self.indicator,
validate=self.validate,
)
# the `index_value` doesn't matter.
index_tokenize_objects = [
left,
right,
self.how,
self.left_on,
self.right_on,
self.left_index,
self.right_index,
]
return self.new_dataframe(
[left, right],
shape=(np.nan, merged.shape[1]),
dtypes=merged.dtypes,
index_value=parse_index(merged.index, *index_tokenize_objects),
columns_value=parse_index(merged.columns, store_data=True),
)
|
def __call__(self, left, right):
empty_left, empty_right = build_empty_df(left.dtypes), build_empty_df(right.dtypes)
# left should have values to keep columns order.
gen_left_data = [np.random.rand(1).astype(dt)[0] for dt in left.dtypes]
empty_left = empty_left.append(
pd.DataFrame([gen_left_data], columns=list(empty_left.columns)).astype(
left.dtypes
)
)
gen_right_data = [np.random.rand(1).astype(dt)[0] for dt in right.dtypes]
empty_right = empty_right.append(
pd.DataFrame([gen_right_data], columns=list(empty_right.columns)).astype(
right.dtypes
)
)
# this `merge` will check whether the combination of those arguments is valid
merged = empty_left.merge(
empty_right,
how=self.how,
on=self.on,
left_on=self.left_on,
right_on=self.right_on,
left_index=self.left_index,
right_index=self.right_index,
sort=self.sort,
suffixes=self.suffixes,
copy=self.copy_,
indicator=self.indicator,
validate=self.validate,
)
# the `index_value` doesn't matter.
index_tokenize_objects = [
left,
right,
self.how,
self.left_on,
self.right_on,
self.left_index,
self.right_index,
]
return self.new_dataframe(
[left, right],
shape=(np.nan, merged.shape[1]),
dtypes=merged.dtypes,
index_value=parse_index(merged.index, *index_tokenize_objects),
columns_value=parse_index(merged.columns, store_data=True),
)
|
https://github.com/mars-project/mars/issues/1110
|
In [4]: df = pd.DataFrame({'a': np.arange(10), 'b': np.random.rand(10)})
In [5]: df2 = df.copy()
In [6]: df2.set_index('a', inplace=True)
In [7]: df2
Out[7]:
b
a
0 0.984265
1 0.544014
2 0.592392
3 0.269762
4 0.236130
5 0.846061
6 0.308780
7 0.604834
8 0.973824
9 0.867099
In [8]: df.merge(df2, on='a') # can work for pandas
Out[8]:
a b_x b_y
0 0 0.984265 0.984265
1 1 0.544014 0.544014
2 2 0.592392 0.592392
3 3 0.269762 0.269762
4 4 0.236130 0.236130
5 5 0.846061 0.846061
6 6 0.308780 0.308780
7 7 0.604834 0.604834
8 8 0.973824 0.973824
9 9 0.867099 0.867099
In [9]: import mars.dataframe as md
In [10]: mdf = md.DataFrame(df)
In [11]: mdf2 = md.DataFrame(df2)
In [12]: mdf.merge(mdf2, on='a') # cannot work for mars dataframe
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-12-bd6a81883d3a> in <module>
----> 1 mdf.merge(mdf2, on='a')
~/Workspace/mars/mars/dataframe/merge/merge.py in merge(df, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, strategy, validate)
350 left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes,
351 copy=copy, indicator=indicator, validate=validate, object_type=ObjectType.dataframe)
--> 352 return op(df, right)
353
354
~/Workspace/mars/mars/dataframe/merge/merge.py in __call__(self, left, right)
174 left_index=self.left_index, right_index=self.right_index,
175 sort=self.sort, suffixes=self.suffixes,
--> 176 copy=self.copy_, indicator=self.indicator, validate=self.validate)
177
178 # the `index_value` doesn't matter.
~/miniconda3/lib/python3.7/site-packages/pandas/core/frame.py in merge(self, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, validate)
7292 copy=copy,
7293 indicator=indicator,
-> 7294 validate=validate,
7295 )
7296
~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/merge.py in merge(left, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, validate)
84 copy=copy,
85 indicator=indicator,
---> 86 validate=validate,
87 )
88 return op.get_result()
~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/merge.py in __init__(self, left, right, how, on, left_on, right_on, axis, left_index, right_index, sort, suffixes, copy, indicator, validate)
625 self.right_join_keys,
626 self.join_names,
--> 627 ) = self._get_merge_keys()
628
629 # validate the merge keys dtypes. We may need to coerce
~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/merge.py in _get_merge_keys(self)
981 if not is_rkey(rk):
982 if rk is not None:
--> 983 right_keys.append(right._get_label_or_level_values(rk))
984 else:
985 # work-around for merge_asof(right_index=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _get_label_or_level_values(self, key, axis)
1689 values = self.axes[axis].get_level_values(key)._values
1690 else:
-> 1691 raise KeyError(key)
1692
1693 # Check for duplicates
KeyError: 'a'
|
KeyError
|
def build_empty_df(dtypes, index=None):
columns = dtypes.index
df = pd.DataFrame(columns=columns, index=index)
for c, d in zip(columns, dtypes):
df[c] = pd.Series(dtype=d, index=index)
return df
|
def build_empty_df(dtypes, index=None):
columns = dtypes.index
df = pd.DataFrame(columns=columns)
for c, d in zip(columns, dtypes):
df[c] = pd.Series(dtype=d, index=index)
return df
|
https://github.com/mars-project/mars/issues/1110
|
In [4]: df = pd.DataFrame({'a': np.arange(10), 'b': np.random.rand(10)})
In [5]: df2 = df.copy()
In [6]: df2.set_index('a', inplace=True)
In [7]: df2
Out[7]:
b
a
0 0.984265
1 0.544014
2 0.592392
3 0.269762
4 0.236130
5 0.846061
6 0.308780
7 0.604834
8 0.973824
9 0.867099
In [8]: df.merge(df2, on='a') # can work for pandas
Out[8]:
a b_x b_y
0 0 0.984265 0.984265
1 1 0.544014 0.544014
2 2 0.592392 0.592392
3 3 0.269762 0.269762
4 4 0.236130 0.236130
5 5 0.846061 0.846061
6 6 0.308780 0.308780
7 7 0.604834 0.604834
8 8 0.973824 0.973824
9 9 0.867099 0.867099
In [9]: import mars.dataframe as md
In [10]: mdf = md.DataFrame(df)
In [11]: mdf2 = md.DataFrame(df2)
In [12]: mdf.merge(mdf2, on='a') # cannot work for mars dataframe
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-12-bd6a81883d3a> in <module>
----> 1 mdf.merge(mdf2, on='a')
~/Workspace/mars/mars/dataframe/merge/merge.py in merge(df, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, strategy, validate)
350 left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes,
351 copy=copy, indicator=indicator, validate=validate, object_type=ObjectType.dataframe)
--> 352 return op(df, right)
353
354
~/Workspace/mars/mars/dataframe/merge/merge.py in __call__(self, left, right)
174 left_index=self.left_index, right_index=self.right_index,
175 sort=self.sort, suffixes=self.suffixes,
--> 176 copy=self.copy_, indicator=self.indicator, validate=self.validate)
177
178 # the `index_value` doesn't matter.
~/miniconda3/lib/python3.7/site-packages/pandas/core/frame.py in merge(self, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, validate)
7292 copy=copy,
7293 indicator=indicator,
-> 7294 validate=validate,
7295 )
7296
~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/merge.py in merge(left, right, how, on, left_on, right_on, left_index, right_index, sort, suffixes, copy, indicator, validate)
84 copy=copy,
85 indicator=indicator,
---> 86 validate=validate,
87 )
88 return op.get_result()
~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/merge.py in __init__(self, left, right, how, on, left_on, right_on, axis, left_index, right_index, sort, suffixes, copy, indicator, validate)
625 self.right_join_keys,
626 self.join_names,
--> 627 ) = self._get_merge_keys()
628
629 # validate the merge keys dtypes. We may need to coerce
~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/merge.py in _get_merge_keys(self)
981 if not is_rkey(rk):
982 if rk is not None:
--> 983 right_keys.append(right._get_label_or_level_values(rk))
984 else:
985 # work-around for merge_asof(right_index=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _get_label_or_level_values(self, key, axis)
1689 values = self.axes[axis].get_level_values(key)._values
1690 else:
-> 1691 raise KeyError(key)
1692
1693 # Check for duplicates
KeyError: 'a'
|
KeyError
|
def __init__(self, input_=None, index=None, dtypes=None, gpu=None, sparse=None, **kw):
super().__init__(
_input=input_,
_index=index,
_dtypes=dtypes,
_gpu=gpu,
_sparse=sparse,
_object_type=ObjectType.dataframe,
**kw,
)
|
def __init__(
self, index=None, dtypes=None, from_1d_tensors=None, gpu=None, sparse=None, **kw
):
super().__init__(
_index=index,
_dtypes=dtypes,
_from_1d_tensors=from_1d_tensors,
_gpu=gpu,
_sparse=sparse,
_object_type=ObjectType.dataframe,
**kw,
)
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
if self._input is not None:
if not isinstance(self._input, dict):
self._input = next(inputs_iter)
else:
# check each value for input
new_input = OrderedDict()
for k, v in self._input.items():
if isinstance(v, (Base, Entity)):
new_input[k] = next(inputs_iter)
else:
new_input[k] = v
self._input = new_input
if self._index is not None:
self._index = next(inputs_iter)
|
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if not self._from_1d_tensors:
self._input = inputs[0]
if self._index is not None:
self._index = inputs[-1]
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def __call__(self, input_tensor, index, columns):
if isinstance(input_tensor, dict):
return self._call_input_1d_tileables(input_tensor, index, columns)
else:
return self._call_input_tensor(input_tensor, index, columns)
|
def __call__(self, input_tensor, index, columns):
if self._from_1d_tensors:
return self._call_input_1d_tensors(input_tensor, index, columns)
else:
return self._call_input_tensor(input_tensor, index, columns)
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def tile(cls, op):
# make sure all tensor have known chunk shapes
check_chunks_unknown_shape(op.inputs, TilesError)
if isinstance(op.input, dict):
return cls._tile_input_1d_tileables(op)
else:
return cls._tile_input_tensor(op)
|
def tile(cls, op):
# make sure all tensor have known chunk shapes
check_chunks_unknown_shape(op.inputs, TilesError)
if op.from_1d_tensors:
return cls._tile_input_1d_tensors(op)
else:
return cls._tile_input_tensor(op)
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def execute(cls, ctx, op):
chunk = op.outputs[0]
if isinstance(op.input, dict):
d = OrderedDict()
for k, v in op.input.items():
if hasattr(v, "key"):
d[k] = ctx[v.key]
else:
d[k] = v
if op.index is not None:
index_data = ctx[op.index.key]
else:
index_data = chunk.index_value.to_pandas()
ctx[chunk.key] = pd.DataFrame(
d, index=index_data, columns=chunk.columns_value.to_pandas()
)
else:
tensor_data = ctx[op.inputs[0].key]
if op.index is not None:
# index is a tensor
index_data = ctx[op.inputs[1].key]
else:
index_data = chunk.index_value.to_pandas()
ctx[chunk.key] = pd.DataFrame(
tensor_data, index=index_data, columns=chunk.columns_value.to_pandas()
)
|
def execute(cls, ctx, op):
chunk = op.outputs[0]
if op.from_1d_tensors:
d = OrderedDict()
tensors = [ctx[inp.key] for inp in op.inputs]
if op.index is not None:
tensors_data, index_data = tensors[:-1], tensors[-1]
else:
tensors_data = tensors
index_data = chunk.index_value.to_pandas()
for name, data_1d in zip(chunk.dtypes.index, tensors_data):
d[name] = data_1d
ctx[chunk.key] = pd.DataFrame(
d, index=index_data, columns=chunk.columns_value.to_pandas()
)
else:
tensor_data = ctx[op.inputs[0].key]
if op.index is not None:
# index is a tensor
index_data = ctx[op.inputs[1].key]
else:
index_data = chunk.index_value.to_pandas()
ctx[chunk.key] = pd.DataFrame(
tensor_data, index=index_data, columns=chunk.columns_value.to_pandas()
)
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def dataframe_from_tensor(tensor, index=None, columns=None, gpu=None, sparse=False):
if tensor.ndim > 2 or tensor.ndim <= 0:
raise TypeError(
"Not support create DataFrame from {0} dims tensor", format(tensor.ndim)
)
try:
col_num = tensor.shape[1]
except IndexError:
col_num = 1
gpu = tensor.op.gpu if gpu is None else gpu
op = DataFrameFromTensor(
input_=tensor,
dtypes=pd.Series([tensor.dtype] * col_num, index=columns),
gpu=gpu,
sparse=sparse,
)
return op(tensor, index, columns)
|
def dataframe_from_tensor(tensor, index=None, columns=None, gpu=None, sparse=False):
if tensor.ndim > 2 or tensor.ndim <= 0:
raise TypeError(
"Not support create DataFrame from {0} dims tensor", format(tensor.ndim)
)
try:
col_num = tensor.shape[1]
except IndexError:
col_num = 1
gpu = tensor.op.gpu if gpu is None else gpu
op = DataFrameFromTensor(
dtypes=pd.Series([tensor.dtype] * col_num, index=columns),
gpu=gpu,
sparse=sparse,
)
return op(tensor, index, columns)
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
chunk_size=None,
gpu=None,
sparse=None,
):
if isinstance(data, TENSOR_TYPE):
if chunk_size is not None:
data = data.rechunk(chunk_size)
df = dataframe_from_tensor(
data, index=index, columns=columns, gpu=gpu, sparse=sparse
)
elif isinstance(data, DATAFRAME_TYPE):
if not hasattr(data, "data"):
# DataFrameData
df = _Frame(data)
else:
df = data
elif isinstance(data, dict) and any(
isinstance(v, (Base, Entity)) for v in data.values()
):
# data is a dict and some value is tensor
df = dataframe_from_1d_tileables(
data, index=index, columns=columns, gpu=gpu, sparse=sparse
)
else:
pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
df = from_pandas_df(pdf, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
super().__init__(df.data)
|
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
chunk_size=None,
gpu=None,
sparse=None,
):
if isinstance(data, TENSOR_TYPE):
if chunk_size is not None:
data = data.rechunk(chunk_size)
df = dataframe_from_tensor(
data, index=index, columns=columns, gpu=gpu, sparse=sparse
)
elif isinstance(data, DATAFRAME_TYPE):
if not hasattr(data, "data"):
# DataFrameData
df = _Frame(data)
else:
df = data
elif isinstance(data, dict) and any(
isinstance(v, (Base, Entity)) for v in data.values()
):
# data is a dict and some value is tensor
columns = list(data.keys()) if columns is None else columns
tensors = []
for c in columns:
tensors.append(astensor(data[c]))
df = dataframe_from_1d_tensors(
tensors, index=index, columns=columns, gpu=gpu, sparse=sparse
)
else:
pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
df = from_pandas_df(pdf, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
super().__init__(df.data)
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def _submit_operand_to_execute(self):
self._semaphore.acquire()
self._queue.wait()
if self._has_error.is_set():
# error happens, ignore
return
with self._lock:
to_submit_op = self._queue.pop(0)
assert to_submit_op.key not in self._submitted_op_keys
self._submitted_op_keys.add(to_submit_op.key)
if self._print_progress:
i, n = len(self._submitted_op_keys), len(self._op_key_to_ops)
if i % 30 or i >= n:
logger.info(
"[{0}] {1:.2f}% percent of graph has been submitted".format(
str(datetime.datetime.now()), float(i) * 100 / n
)
)
if self._prefetch:
# check the operand's outputs if any of its successor's predecessors can be prefetched
self._prefetch_executor.submit(self._fetch_chunks, to_submit_op.outputs)
# execute the operand and return future
return self._operand_executor.submit(self._execute_operand, to_submit_op)
|
def _submit_operand_to_execute(self):
self._semaphore.acquire()
self._queue.wait()
if self._has_error.is_set():
# error happens, ignore
return
to_submit_op = self._queue.pop(0)
assert to_submit_op.key not in self._submitted_op_keys
self._submitted_op_keys.add(to_submit_op.key)
if self._print_progress:
i, n = len(self._submitted_op_keys), len(self._op_key_to_ops)
if i % 30 or i >= n:
logger.info(
"[{0}] {1:.2f}% percent of graph has been submitted".format(
str(datetime.datetime.now()), float(i) * 100 / n
)
)
if self._prefetch:
# check the operand's outputs if any of its successor's predecessors can be prefetched
self._prefetch_executor.submit(self._fetch_chunks, to_submit_op.outputs)
# execute the operand and return future
return self._operand_executor.submit(self._execute_operand, to_submit_op)
|
https://github.com/mars-project/mars/issues/1097
|
In [7]: md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-7-ec6db392e00f> in <module>
----> 1 md.DataFrame({'a': '1', 'b': md.Series([1, 2, 3])}).execute()
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
46 df = dataframe_from_1d_tensors(
47 tensors, index=index, columns=columns,
---> 48 gpu=gpu, sparse=sparse)
49 else:
50 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in dataframe_from_1d_tensors(tensors, index, columns, gpu, sparse)
328 op = DataFrameFromTensor(dtypes=dtypes, from_1d_tensors=True,
329 gpu=gpu, sparse=sparse)
--> 330 return op(tensors, index, columns)
331
332
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
73 def __call__(self, input_tensor, index, columns):
74 if self._from_1d_tensors:
---> 75 return self._call_input_1d_tensors(input_tensor, index, columns)
76 else:
77 return self._call_input_tensor(input_tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in _call_input_1d_tensors(self, input_1d_tensors, index, columns)
99 def _call_input_1d_tensors(self, input_1d_tensors, index, columns):
100 if len({t.shape for t in input_1d_tensors}) != 1:
--> 101 raise ValueError('input 1-d tensors should have same shape')
102
103 inputs = list(input_1d_tensors)
ValueError: input 1-d tensors should have same shape
|
ValueError
|
def validate_axis(axis, tileable=None):
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
illegal = False
try:
axis = operator.index(axis)
if axis < 0 or (tileable is not None and axis >= tileable.ndim):
illegal = True
except TypeError:
illegal = True
if illegal:
raise ValueError(
"No axis named {} for object type {}".format(axis, type(tileable))
)
return axis
|
def validate_axis(axis, tileable=None):
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
illegal = False
try:
axis = operator.index(axis)
if axis < 0 or (tileable and axis >= tileable.ndim):
illegal = True
except TypeError:
illegal = True
if illegal:
raise ValueError(
"No axis named {} for object type {}".format(axis, type(tileable))
)
return axis
|
https://github.com/mars-project/mars/issues/1090
|
import mars.dataframe as md
df = md.read_csv('/home/xuye.qin/ml-20m/ratings.csv')
df.sort_values(by='rating')
Traceback (most recent call last):
File "/home/xuye.qin/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3296, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-4-d04621974535>", line 1, in <module>
df.sort_values(by='rating')
File "/tmp/pycharm_project_852/mars/dataframe/sort/sort_values.py", line 181, in dataframe_sort_values
axis = validate_axis(axis, df)
File "/tmp/pycharm_project_852/mars/dataframe/utils.py", line 623, in validate_axis
'object type {}'.format(axis, type(tileable)))
ValueError: No axis named 0 for object type <class 'mars.dataframe.core.DataFrame'>
|
ValueError
|
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
a = xp.concatenate(inputs, axis=op.axis)
p = len(inputs)
assert a.shape[op.axis] == p**2
if op.kind is not None:
# sort
_sort(a, op, xp, inplace=True)
else:
# prepare for sampling via `partition`
kth = xp.arange(p - 1, (p - 1) ** 2 + 1, p - 1)
a.partition(kth, axis=op.axis)
select = slice(p, p**2 + 1, p)
slc = (slice(None),) * op.axis + (select,)
ctx[op.outputs[0].key] = result = a[slc]
assert result.shape[op.axis] == p - 1
|
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
a = xp.concatenate(inputs, axis=op.axis)
p = len(inputs)
assert a.shape[op.axis] == p**2
if op.kind is not None:
# sort
_sort(a, op, xp, inplace=True)
else:
# prepare for sampling via `partition`
kth = xp.arange(p - 1, (p - 1) ** 2 + 1, p - 1)
a.partition(kth, axis=op.axis)
select = slice(p - 1, (p - 1) ** 2 + 1, p - 1)
slc = (slice(None),) * op.axis + (select,)
ctx[op.outputs[0].key] = a[slc]
|
https://github.com/mars-project/mars/issues/1090
|
import mars.dataframe as md
df = md.read_csv('/home/xuye.qin/ml-20m/ratings.csv')
df.sort_values(by='rating')
Traceback (most recent call last):
File "/home/xuye.qin/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3296, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-4-d04621974535>", line 1, in <module>
df.sort_values(by='rating')
File "/tmp/pycharm_project_852/mars/dataframe/sort/sort_values.py", line 181, in dataframe_sort_values
axis = validate_axis(axis, df)
File "/tmp/pycharm_project_852/mars/dataframe/utils.py", line 623, in validate_axis
'object type {}'.format(axis, type(tileable)))
ValueError: No axis named 0 for object type <class 'mars.dataframe.core.DataFrame'>
|
ValueError
|
def tile_with_mask(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
out_chunks = []
if isinstance(op.mask, SERIES_TYPE):
mask = op.inputs[1]
nsplits, out_shape, df_chunks, mask_chunks = align_dataframe_series(
in_df, mask, axis="index"
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for idx, df_chunk in zip(out_chunk_indexes, df_chunks):
mask_chunk = mask_chunks[df_chunk.index[0]]
out_chunk = (
op.copy()
.reset_key()
.new_chunk(
[df_chunk, mask_chunk],
shape=(np.nan, df_chunk.shape[1]),
index=idx,
index_value=df_chunk.index_value,
columns_value=df_chunk.columns_value,
dtypes=df_chunk.dtypes,
)
)
out_chunks.append(out_chunk)
else:
check_chunks_unknown_shape([in_df], TilesError)
nsplits_acc = np.cumsum((0,) + in_df.nsplits[0])
for idx in range(in_df.chunk_shape[0]):
for idxj in range(in_df.chunk_shape[1]):
in_chunk = in_df.cix[idx, idxj]
chunk_op = op.copy().reset_key()
chunk_op._mask = op.mask.iloc[nsplits_acc[idx] : nsplits_acc[idx + 1]]
out_chunk = chunk_op.new_chunk(
[in_chunk],
index=in_chunk.index,
shape=(np.nan, in_chunk.shape[1]),
dtypes=in_chunk.dtypes,
index_value=in_df.index_value,
columns_value=in_chunk.columns_value,
)
out_chunks.append(out_chunk)
nsplits = ((np.nan,) * in_df.chunk_shape[0], in_df.nsplits[1])
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=nsplits,
)
|
def tile_with_mask(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
out_chunks = []
if isinstance(op.mask, SERIES_TYPE):
mask = op.inputs[1]
nsplits, out_shape, df_chunks, mask_chunks = align_dataframe_series(
in_df, mask, axis="index"
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for idx, df_chunk in zip(out_chunk_indexes, df_chunks):
mask_chunk = mask_chunks[df_chunk.index[0]]
out_chunk = (
op.copy()
.reset_key()
.new_chunk(
[df_chunk, mask_chunk],
shape=(np.nan, df_chunk.shape[1]),
index=idx,
index_value=df_chunk.index_value,
columns_value=df_chunk.columns_value,
)
)
out_chunks.append(out_chunk)
else:
check_chunks_unknown_shape([in_df], TilesError)
nsplits_acc = np.cumsum((0,) + in_df.nsplits[0])
for idx in range(in_df.chunk_shape[0]):
for idxj in range(in_df.chunk_shape[1]):
in_chunk = in_df.cix[idx, idxj]
chunk_op = op.copy().reset_key()
chunk_op._mask = op.mask.iloc[nsplits_acc[idx] : nsplits_acc[idx + 1]]
out_chunk = chunk_op.new_chunk(
[in_chunk],
index=in_chunk.index,
shape=(np.nan, in_chunk.shape[1]),
dtypes=in_chunk.dtypes,
index_value=in_df.index_value,
columns_value=in_chunk.columns_value,
)
out_chunks.append(out_chunk)
nsplits = ((np.nan,) * in_df.chunk_shape[0], in_df.nsplits[1])
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=nsplits,
)
|
https://github.com/mars-project/mars/issues/1055
|
Traceback (most recent call last):
File "mars/serialize/pbserializer.pyx", line 527, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
field_obj = getattr(obj, tag)
AttributeError: index_value
|
AttributeError
|
def tile(cls, op):
df = op.outputs[0]
left = build_concatenated_rows_frame(op.inputs[0])
right = build_concatenated_rows_frame(op.inputs[1])
if len(left.chunks) == 1 or len(right.chunks) == 1:
return cls._tile_one_chunk(op, left, right)
left_row_chunk_size = left.chunk_shape[0]
right_row_chunk_size = right.chunk_shape[0]
out_row_chunk_size = max(left_row_chunk_size, right_row_chunk_size)
out_chunk_shape = (out_row_chunk_size, 1)
nsplits = [[np.nan for _ in range(out_row_chunk_size)], [df.shape[1]]]
left_on = _prepare_shuffle_on(op.left_index, op.left_on, op.on)
right_on = _prepare_shuffle_on(op.right_index, op.right_on, op.on)
# do shuffle
left_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, left_on, left)
right_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, right_on, right)
out_chunks = []
for left_chunk, right_chunk in zip(left_chunks, right_chunks):
merge_op = op.copy().reset_key()
out_chunk = merge_op.new_chunk(
[left_chunk, right_chunk],
shape=(np.nan, df.shape[1]),
index=left_chunk.index,
index_value=infer_index_value(
left_chunk.index_value, right_chunk.index_value
),
columns_value=df.columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
def tile(cls, op):
df = op.outputs[0]
left = build_concatenated_rows_frame(op.inputs[0])
right = build_concatenated_rows_frame(op.inputs[1])
# left and right now are guaranteed only chunked along index axis, not column axis.
if left.chunk_shape[1] > 1:
check_chunks_unknown_shape([left], TilesError)
left = left.rechunk({1: left.shape[1]})._inplace_tile()
if right.chunk_shape[1] > 1:
check_chunks_unknown_shape([right], TilesError)
right = right.rechunk({1: right.shape[1]})._inplace_tile()
if len(left.chunks) == 1 or len(right.chunks) == 1:
return cls._tile_one_chunk(op, left, right)
left_row_chunk_size = left.chunk_shape[0]
right_row_chunk_size = right.chunk_shape[0]
out_row_chunk_size = max(left_row_chunk_size, right_row_chunk_size)
out_chunk_shape = (out_row_chunk_size, 1)
nsplits = [[np.nan for _ in range(out_row_chunk_size)], [df.shape[1]]]
left_on = _prepare_shuffle_on(op.left_index, op.left_on, op.on)
right_on = _prepare_shuffle_on(op.right_index, op.right_on, op.on)
# do shuffle
left_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, left_on, left)
right_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, right_on, right)
out_chunks = []
for left_chunk, right_chunk in zip(left_chunks, right_chunks):
merge_op = op.copy().reset_key()
out_chunk = merge_op.new_chunk(
[left_chunk, right_chunk],
shape=(np.nan, df.shape[1]),
index=left_chunk.index,
index_value=infer_index_value(
left_chunk.index_value, right_chunk.index_value
),
columns_value=df.columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
https://github.com/mars-project/mars/issues/1055
|
Traceback (most recent call last):
File "mars/serialize/pbserializer.pyx", line 527, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
field_obj = getattr(obj, tag)
AttributeError: index_value
|
AttributeError
|
def set_operand_state(self, op_key, state):
if (
op_key not in self._operand_infos
and self._chunk_graph_builder.iterative_chunk_graphs
and state == OperandState.FREED
):
# if iterative tiling is entered,
# the `_operand_infos` will be a completely new one,
# in this case, we don't actually care about if the op is freed
return
if op_key not in self._operand_infos and self.state in GraphState.TERMINATED_STATES:
# if operand has been cleared in iterative tiling and execute again in another
# graph, just ignore it.
return
op_info = self._operand_infos[op_key]
op_info["state"] = state
self._graph_meta_ref.update_op_state(
op_key, op_info["op_name"], state, _tell=True, _wait=False
)
try:
del op_info["failover_state"]
except KeyError:
pass
|
def set_operand_state(self, op_key, state):
if (
op_key not in self._operand_infos
and self._chunk_graph_builder.iterative_chunk_graphs
and state == OperandState.FREED
):
# if iterative tiling is entered,
# the `_operand_infos` will be a completely new one,
# in this case, we don't actually care about if the op is freed
return
op_info = self._operand_infos[op_key]
op_info["state"] = state
self._graph_meta_ref.update_op_state(
op_key, op_info["op_name"], state, _tell=True, _wait=False
)
try:
del op_info["failover_state"]
except KeyError:
pass
|
https://github.com/mars-project/mars/issues/1055
|
Traceback (most recent call last):
File "mars/serialize/pbserializer.pyx", line 527, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
field_obj = getattr(obj, tag)
AttributeError: index_value
|
AttributeError
|
def set_operand_worker(self, op_key, worker):
if op_key not in self._operand_infos and self.state in GraphState.TERMINATED_STATES:
# if operand has been cleared in iterative tiling and execute again in another
# graph, just ignore it.
return
op_info = self._operand_infos[op_key]
if worker:
op_info["worker"] = worker
else:
try:
del op_info["worker"]
except KeyError:
pass
self._graph_meta_ref.update_op_worker(
op_key, op_info["op_name"], worker, _tell=True, _wait=False
)
|
def set_operand_worker(self, op_key, worker):
op_info = self._operand_infos[op_key]
if worker:
op_info["worker"] = worker
else:
try:
del op_info["worker"]
except KeyError:
pass
self._graph_meta_ref.update_op_worker(
op_key, op_info["op_name"], worker, _tell=True, _wait=False
)
|
https://github.com/mars-project/mars/issues/1055
|
Traceback (most recent call last):
File "mars/serialize/pbserializer.pyx", line 527, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
field_obj = getattr(obj, tag)
AttributeError: index_value
|
AttributeError
|
def append_graph(self, graph_key, op_info):
from ..graph import GraphActor
if not self._is_terminal:
self._is_terminal = op_info.get("is_terminal")
graph_ref = self.get_actor_ref(GraphActor.gen_uid(self._session_id, graph_key))
self._graph_refs.append(graph_ref)
self._pred_keys.update(op_info["io_meta"]["predecessors"])
self._succ_keys.update(op_info["io_meta"]["successors"])
if (
self._state not in OperandState.STORED_STATES
and self._state != OperandState.RUNNING
):
self._state = op_info["state"]
if self._state not in OperandState.TERMINATED_STATES:
self.start_operand()
|
def append_graph(self, graph_key, op_info):
from ..graph import GraphActor
if not self._is_terminal:
self._is_terminal = op_info.get("is_terminal")
graph_ref = self.get_actor_ref(GraphActor.gen_uid(self._session_id, graph_key))
self._graph_refs.append(graph_ref)
self._pred_keys.update(op_info["io_meta"]["predecessors"])
self._succ_keys.update(op_info["io_meta"]["successors"])
if (
self._state not in OperandState.STORED_STATES
and self._state != OperandState.RUNNING
):
self._state = op_info["state"]
|
https://github.com/mars-project/mars/issues/1055
|
Traceback (most recent call last):
File "mars/serialize/pbserializer.pyx", line 527, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
field_obj = getattr(obj, tag)
AttributeError: index_value
|
AttributeError
|
def submit_to_worker(self, worker, data_metas):
# worker assigned, submit job
if self.state in (OperandState.CANCELLED, OperandState.CANCELLING):
self.start_operand()
return
if self.state == OperandState.RUNNING:
# already running
return
self.worker = worker
target_predicts = self._get_target_predicts(worker)
try:
input_metas = self._io_meta["input_data_metas"]
input_chunks = [k[0] if isinstance(k, tuple) else k for k in input_metas]
except KeyError:
input_chunks = self._input_chunks
# submit job
if set(input_chunks) != set(self._input_chunks) or self._executable_dag is None:
exec_graph = self._graph_refs[-1].get_executable_operand_dag(
self._op_key, input_chunks
)
else:
exec_graph = self._executable_dag
self._execution_ref = self._get_execution_ref()
try:
with rewrite_worker_errors():
self._submit_promise = self._execution_ref.execute_graph(
self._session_id,
self._op_key,
exec_graph,
self._io_meta,
data_metas,
calc_device=self._calc_device,
send_addresses=target_predicts,
_promise=True,
_spawn=False,
)
except WorkerDead:
logger.debug(
"Worker %s dead when submitting operand %s into queue", worker, self._op_key
)
self._resource_ref.detach_dead_workers([worker], _tell=True)
else:
self.start_operand(OperandState.RUNNING)
|
def submit_to_worker(self, worker, data_metas):
# worker assigned, submit job
if self.state in (OperandState.CANCELLED, OperandState.CANCELLING):
self.start_operand()
return
if self.state == OperandState.RUNNING:
# already running
return
self.worker = worker
target_predicts = self._get_target_predicts(worker)
try:
input_metas = self._io_meta["input_data_metas"]
input_chunks = [k[0] if isinstance(k, tuple) else k for k in input_metas]
except KeyError:
input_chunks = self._input_chunks
# submit job
if set(input_chunks) != set(self._input_chunks) or self._executable_dag is None:
exec_graph = self._graph_refs[0].get_executable_operand_dag(
self._op_key, input_chunks
)
else:
exec_graph = self._executable_dag
self._execution_ref = self._get_execution_ref()
try:
with rewrite_worker_errors():
self._submit_promise = self._execution_ref.execute_graph(
self._session_id,
self._op_key,
exec_graph,
self._io_meta,
data_metas,
calc_device=self._calc_device,
send_addresses=target_predicts,
_promise=True,
_spawn=False,
)
except WorkerDead:
logger.debug(
"Worker %s dead when submitting operand %s into queue", worker, self._op_key
)
self._resource_ref.detach_dead_workers([worker], _tell=True)
else:
self.start_operand(OperandState.RUNNING)
|
https://github.com/mars-project/mars/issues/1055
|
Traceback (most recent call last):
File "mars/serialize/pbserializer.pyx", line 527, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
field_obj = getattr(obj, tag)
AttributeError: index_value
|
AttributeError
|
def _add_finished_terminal(self, final_state=None, exc=None):
futures = []
for graph_ref in self._graph_refs:
if graph_ref.reload_state() in (GraphState.RUNNING, GraphState.CANCELLING):
futures.append(
graph_ref.add_finished_terminal(
self._op_key,
final_state=final_state,
exc=exc,
_tell=True,
_wait=False,
)
)
return futures
|
def _add_finished_terminal(self, final_state=None, exc=None):
futures = []
for graph_ref in self._graph_refs:
futures.append(
graph_ref.add_finished_terminal(
self._op_key, final_state=final_state, exc=exc, _tell=True, _wait=False
)
)
return futures
|
https://github.com/mars-project/mars/issues/1055
|
Traceback (most recent call last):
File "mars/serialize/pbserializer.pyx", line 527, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
field_obj = getattr(obj, tag)
AttributeError: index_value
|
AttributeError
|
def tile(cls, op):
from ..datasource import arange
in_tensor = astensor(op.input)
flattened = in_tensor.astype(bool).flatten()
recursive_tile(flattened)
indices = arange(flattened.size, dtype=np.intp, chunk_size=flattened.nsplits)
indices = indices[flattened]
dim_indices = unravel_index(indices, in_tensor.shape)
[recursive_tile(ind) for ind in dim_indices]
kws = [
{"nsplits": ind.nsplits, "chunks": ind.chunks, "shape": o.shape}
for ind, o in zip(dim_indices, op.outputs)
]
new_op = op.copy()
return new_op.new_tensors(op.inputs, kws=kws, output_limit=len(kws))
|
def tile(cls, op):
from ..datasource import arange
in_tensor = op.input
flattened = in_tensor.astype(bool).flatten()
recursive_tile(flattened)
indices = arange(flattened.size, dtype=np.intp, chunk_size=flattened.nsplits)
indices = indices[flattened]
dim_indices = unravel_index(indices, in_tensor.shape)
[recursive_tile(ind) for ind in dim_indices]
kws = [
{"nsplits": ind.nsplits, "chunks": ind.chunks, "shape": o.shape}
for ind, o in zip(dim_indices, op.outputs)
]
new_op = op.copy()
return new_op.new_tensors(op.inputs, kws=kws, output_limit=len(kws))
|
https://github.com/mars-project/mars/issues/953
|
runfile('C:/Users/Lenovo/Desktop/test/mars/test.py', wdir='C:/Users/Lenovo/Desktop/test/mars')
Traceback (most recent call last):
File "C:\Users\Lenovo\Desktop\test\mars\test.py", line 25, in <module>
sess.run(mt.where( x > 5 ))
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\session.py", line 183, in run
result = self._sess.run(*tileables, **kw)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\session.py", line 90, in run
res = self._executor.execute_tileables(tileables, **kw)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 480, in inner
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\executor.py", line 745, in execute_tileables
tileables, tileable_graph=tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 480, in inner
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 342, in build
tileables, tileable_graph=tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 480, in inner
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 256, in build
self._on_tile_failure(tileable_data.op, exc_info)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 294, in inner
six.reraise(*exc_info)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\lib\six.py", line 703, in reraise
raise value
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 236, in build
tiled = self._tile(tileable_data, tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 330, in _tile
return super(IterativeChunkGraphBuilder, self)._tile(tileable_data, tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 194, in _tile
tds[0]._inplace_tile()
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\core.py", line 162, in _inplace_tile
return handler.inplace_tile(self)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 128, in inplace_tile
dispatched = self.dispatch(to_tile.op)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 116, in dispatch
return op_cls.tile(op)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tensor\indexing\nonzero.py", line 52, in tile
flattened = in_tensor.astype(bool).flatten()
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tensor\base\astype.py", line 146, in _astype
return tensor if not copy else tensor.copy(order=order)
TypeError: copy() got an unexpected keyword argument 'order'
|
TypeError
|
def execute(cls, ctx, op):
xdf = cudf if op.gpu else pd
out_df = op.outputs[0]
csv_kwargs = op.extra_params.copy()
with open_file(
op.path, compression=op.compression, storage_options=op.storage_options
) as f:
if op.compression is not None:
# As we specify names and dtype, we need to skip header rows
csv_kwargs["skiprows"] = 1 if op.header == "infer" else op.header
df = xdf.read_csv(
BytesIO(f.read()),
sep=op.sep,
names=op.names,
index_col=op.index_col,
dtype=cls._validate_dtypes(op.outputs[0].dtypes, op.gpu),
**csv_kwargs,
)
else:
df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op)
ctx[out_df.key] = df
|
def execute(cls, ctx, op):
xdf = cudf if op.gpu else pd
out_df = op.outputs[0]
csv_kwargs = op.extra_params.copy()
with open_file(
op.path, compression=op.compression, storage_options=op.storage_options
) as f:
if op.compression is not None:
# As we specify names and dtype, we need to skip header rows
csv_kwargs["skiprows"] = 1 if op.header == "infer" else op.header
df = xdf.read_csv(
BytesIO(f.read()),
sep=op.sep,
names=op.names,
index_col=op.index_col,
dtype=out_df.dtypes.to_dict(),
**csv_kwargs,
)
else:
start, end = _find_chunk_start_end(f, op.offset, op.size)
f.seek(start)
b = BytesIO(f.read(end - start))
if end == start:
# the last chunk may be empty
df = build_empty_df(out_df.dtypes)
else:
if start == 0:
# The first chunk contains header
# As we specify names and dtype, we need to skip header rows
csv_kwargs["skiprows"] = 1 if op.header == "infer" else op.header
df = xdf.read_csv(
b,
sep=op.sep,
names=op.names,
index_col=op.index_col,
dtype=out_df.dtypes.to_dict(),
**csv_kwargs,
)
ctx[out_df.key] = df
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def __init__(
self, func=None, by=None, as_index=None, sort=None, method=None, stage=None, **kw
):
super(DataFrameGroupByAgg, self).__init__(
_func=func,
_by=by,
_as_index=as_index,
_sort=sort,
_method=method,
_stage=stage,
_object_type=ObjectType.dataframe,
**kw,
)
|
def __init__(self, func=None, by=None, as_index=None, method=None, stage=None, **kw):
super(DataFrameGroupByAgg, self).__init__(
_func=func,
_by=by,
_as_index=as_index,
_method=method,
_stage=stage,
_object_type=ObjectType.dataframe,
**kw,
)
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def _execute_map(cls, df, op):
if isinstance(op.func, (six.string_types, dict)):
return df.groupby(op.by, as_index=op.as_index, sort=False).agg(op.func)
else:
raise NotImplementedError
|
def _execute_map(cls, df, op):
if isinstance(op.func, (six.string_types, dict)):
return df.groupby(op.by, as_index=op.as_index).agg(op.func)
else:
raise NotImplementedError
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def _execute_combine(cls, df, op):
if isinstance(op.func, (six.string_types, dict)):
return df.groupby(level=0, as_index=op.as_index, sort=op.sort).agg(op.func)
else:
raise NotImplementedError
|
def _execute_combine(cls, df, op):
if isinstance(op.func, (six.string_types, dict)):
return df.groupby(op.by, as_index=op.as_index).agg(op.func)
else:
raise NotImplementedError
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def agg(groupby, func, method="tree"):
"""
Aggregate using one or more operations on grouped data.
:param groupby: Groupby data.
:param func: Aggregation functions.
:param method: 'shuffle' or 'tree', 'tree' method provide a better performance, 'shuffle' is recommended
if aggregated result is very large.
:return: Aggregated result.
"""
# When perform a computation on the grouped data, we won't shuffle
# the data in the stage of groupby and do shuffle after aggregation.
if not isinstance(groupby, GROUPBY_TYPE):
raise TypeError("Input should be type of groupby, not %s" % type(groupby))
elif isinstance(func, list):
raise NotImplementedError("Function list is not supported now.")
if method not in ["shuffle", "tree"]:
raise NotImplementedError("Method %s has not been implemented" % method)
if isinstance(func, six.string_types):
funcs = [func]
elif isinstance(func, dict):
funcs = func.values()
else:
raise NotImplementedError("Type %s is not support" % type(func))
for f in funcs:
if f not in ["sum", "prod", "min", "max"]:
raise NotImplementedError(
"Aggregation function %s has not been supported" % f
)
in_df = groupby.inputs[0]
agg_op = DataFrameGroupByAgg(
func=func,
by=groupby.op.by,
method=method,
as_index=groupby.op.as_index,
sort=groupby.op.sort,
)
return agg_op(in_df)
|
def agg(groupby, func, method="tree"):
"""
Aggregate using one or more operations on grouped data.
:param groupby: Groupby data.
:param func: Aggregation functions.
:param method: 'shuffle' or 'tree', 'tree' method provide a better performance, 'shuffle' is recommended
if aggregated result is very large.
:return: Aggregated result.
"""
# When perform a computation on the grouped data, we won't shuffle
# the data in the stage of groupby and do shuffle after aggregation.
if not isinstance(groupby, GROUPBY_TYPE):
raise TypeError("Input should be type of groupby, not %s" % type(groupby))
elif isinstance(func, list):
raise NotImplementedError("Function list is not supported now.")
if method not in ["shuffle", "tree"]:
raise NotImplementedError("Method %s has not been implemented" % method)
if isinstance(func, six.string_types):
funcs = [func]
elif isinstance(func, dict):
funcs = func.values()
else:
raise NotImplementedError("Type %s is not support" % type(func))
for f in funcs:
if f not in ["sum", "prod", "min", "max"]:
raise NotImplementedError(
"Aggregation function %s has not been supported" % f
)
in_df = groupby.inputs[0]
agg_op = DataFrameGroupByAgg(
func=func, by=groupby.op.by, method=method, as_index=groupby.op.as_index
)
return agg_op(in_df)
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def __init__(
self, by=None, as_index=None, sort=None, object_type=ObjectType.groupby, **kw
):
super(DataFrameGroupByOperand, self).__init__(
_by=by, _as_index=as_index, _sort=sort, _object_type=object_type, **kw
)
|
def __init__(self, by=None, as_index=None, object_type=ObjectType.groupby, **kw):
super(DataFrameGroupByOperand, self).__init__(
_by=by, _as_index=as_index, _object_type=object_type, **kw
)
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def dataframe_groupby(df, by, as_index=True, sort=True):
if isinstance(by, six.string_types):
by = [by]
op = DataFrameGroupByOperand(by=by, as_index=as_index, sort=sort)
return op(df)
|
def dataframe_groupby(df, by, as_index=True):
if isinstance(by, six.string_types):
by = [by]
op = DataFrameGroupByOperand(by=by, as_index=as_index)
return op(df)
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def execute(cls, ctx, op):
def _base_concat(chunk, inputs):
# auto generated concat when executing a DataFrame, Series or Index
if chunk.op.object_type == ObjectType.dataframe:
return _auto_concat_dataframe_chunks(chunk, inputs)
elif chunk.op.object_type == ObjectType.series:
return _auto_concat_series_chunks(chunk, inputs)
else:
raise TypeError(
"Only DataFrameChunk, SeriesChunk and IndexChunk "
"can be automatically concatenated"
)
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
# cuDF will lost index name when concat two seriess.
ret.index.name = concats[0].index.name
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns_value, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
def _auto_concat_series_chunks(chunk, inputs):
# auto generated concat when executing a Series
if all(np.isscalar(inp) for inp in inputs):
return pd.Series(inputs)
else:
xdf = pd if isinstance(inputs[0], pd.Series) else cudf
if chunk.op.axis is not None:
concat = xdf.concat(inputs, axis=chunk.op.axis)
else:
concat = xdf.concat(inputs)
if getattr(chunk.index_value, "should_be_monotonic", False):
concat.sort_index(inplace=True)
return concat
chunk = op.outputs[0]
inputs = [ctx[input.key] for input in op.inputs]
if isinstance(inputs[0], tuple):
ctx[chunk.key] = tuple(
_base_concat(chunk, [input[i] for input in inputs])
for i in range(len(inputs[0]))
)
else:
ctx[chunk.key] = _base_concat(chunk, inputs)
|
def execute(cls, ctx, op):
def _base_concat(chunk, inputs):
# auto generated concat when executing a DataFrame, Series or Index
if chunk.op.object_type == ObjectType.dataframe:
return _auto_concat_dataframe_chunks(chunk, inputs)
elif chunk.op.object_type == ObjectType.series:
return _auto_concat_series_chunks(chunk, inputs)
else:
raise TypeError(
"Only DataFrameChunk, SeriesChunk and IndexChunk "
"can be automatically concatenated"
)
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns_value, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
def _auto_concat_series_chunks(chunk, inputs):
# auto generated concat when executing a Series
if all(np.isscalar(inp) for inp in inputs):
return pd.Series(inputs)
else:
xdf = pd if isinstance(inputs[0], pd.Series) else cudf
if chunk.op.axis is not None:
concat = xdf.concat(inputs, axis=chunk.op.axis)
else:
concat = xdf.concat(inputs)
if getattr(chunk.index_value, "should_be_monotonic", False):
concat.sort_index(inplace=True)
return concat
chunk = op.outputs[0]
inputs = [ctx[input.key] for input in op.inputs]
if isinstance(inputs[0], tuple):
ctx[chunk.key] = tuple(
_base_concat(chunk, [input[i] for input in inputs])
for i in range(len(inputs[0]))
)
else:
ctx[chunk.key] = _base_concat(chunk, inputs)
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
# cuDF will lost index name when concat two seriess.
ret.index.name = concats[0].index.name
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns_value, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
|
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns_value, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
|
https://github.com/mars-project/mars/issues/852
|
In [5]: df = md.read_csv('/home/xuye.qin/kaisheng.hks/G1_1e8_1e2_0_0.csv', gpu=True)
In [6]: _ = df.execute()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-6-bdc0e119412a> in <module>
----> 1 _ = df.execute()
~/kaisheng.hks/mars/mars/core.py in execute(self, session, **kw)
443 if session is None:
444 session = Session.default_or_local()
--> 445 return session.run(self, **kw)
446
447 def fetch(self, session=None, **kw):
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
179 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
180 for t in tileables)
--> 181 result = self._sess.run(*tileables, **kw)
182
183 for t in tileables:
~/kaisheng.hks/mars/mars/session.py in run(self, *tileables, **kw)
88 # set number of running cores
89 self.context.set_ncores(kw['n_parallel'])
---> 90 res = self._executor.execute_tileables(tileables, **kw)
91 return res
92
~/kaisheng.hks/mars/mars/utils.py in _wrapped(*args, **kwargs)
384 _kernel_mode.eager = False
385 _kernel_mode.eager_count = enter_eager_count + 1
--> 386 return func(*args, **kwargs)
387 finally:
388 _kernel_mode.eager_count -= 1
~/kaisheng.hks/mars/mars/utils.py in inner(*args, **kwargs)
471 def inner(*args, **kwargs):
472 with build_mode():
--> 473 return func(*args, **kwargs)
474 return inner
475
~/kaisheng.hks/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
734 self.execute_graph(chunk_graph, list(temp_result_keys), n_parallel=n_parallel or n_thread,
735 print_progress=print_progress, mock=mock,
--> 736 chunk_result=chunk_result)
737 if chunk_graph_builder.done:
738 if len(intermediate_result_keys) > 0:
~/kaisheng.hks/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
610 mock=mock, mock_max_memory=self._mock_max_memory,
611 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 612 res = graph_execution.execute(retval)
613 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
614 if mock:
~/kaisheng.hks/mars/mars/executor.py in execute(self, retval)
495 # wait until all the futures completed
496 for future in executed_futures:
--> 497 future.result()
498
499 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/kaisheng.hks/mars/mars/executor.py in _execute_operand(self, op)
372 # so we pass the first operand's first output to Executor.handle
373 first_op = ops[0]
--> 374 Executor.handle(first_op, results, self._mock)
375
376 # update maximal memory usage during execution
~/kaisheng.hks/mars/mars/executor.py in handle(cls, op, results, mock)
559 try:
560 if UFuncTypeError is None:
--> 561 return runner(results, op)
562 else:
563 # Cast `UFuncTypeError` to `TypeError` since subclasses of the former is unpickleable.
~/kaisheng.hks/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op)
209 csv_kwargs['skiprows'] = 1 if op.header == 'infer' else op.header
210 df = xdf.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col,
--> 211 dtype=out_df.dtypes.to_dict(), **csv_kwargs)
212 ctx[out_df.key] = df
213
~/miniconda3/lib/python3.7/site-packages/cudf/io/csv.py in read_csv(filepath_or_buffer, lineterminator, quotechar, quoting, doublequote, header, mangle_dupe_cols, usecols, sep, delimiter, delim_whitespace, skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst, compression, thousands, decimal, true_values, false_values, nrows, byte_range, skip_blank_lines, parse_dates, comment, na_values, keep_default_na, na_filter, prefix, index_col)
79 na_filter=na_filter,
80 prefix=prefix,
---> 81 index_col=index_col,
82 )
83
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
cudf/bindings/csv.pyx in cudf.bindings.csv.cpp_read_csv()
RuntimeError: cuDF failure at: /conda/conda-bld/libcudf_1566414954289/work/cpp/src/io/csv/csv_reader_impl.cu:624: Unsupported data type
|
RuntimeError
|
def _create_chunk(self, output_idx, index, **kw):
inputs = self.inputs
if kw.get("index_value", None) is None and inputs[0].index_value is not None:
input_index_value = inputs[0].index_value
index_min_max = self.index_min_max
if index_min_max is not None:
kw["index_value"] = filter_index_value(input_index_value, index_min_max)
else:
kw["index_value"] = parse_index(
inputs[0].index_value.to_pandas(),
key=tokenize(input_index_value.key, type(self).__name__),
)
if (
kw.get("columns_value", None) is None
and getattr(inputs[0], "columns_value", None) is not None
):
input_columns_value = inputs[0].columns_value
input_dtypes = inputs[0].dtypes
column_min_max = self.column_min_max
if column_min_max is not None:
kw["columns_value"] = filter_index_value(
input_columns_value, column_min_max, store_data=True
)
else:
kw["columns_value"] = parse_index(
inputs[0].columns_value.to_pandas(),
key=tokenize(input_columns_value.key, type(self).__name__),
)
kw["dtypes"] = input_dtypes[kw["columns_value"].to_pandas()]
column_shuffle_size = self.column_shuffle_size
if column_shuffle_size is not None:
self._column_shuffle_segments = hash_dtypes(
input_dtypes, column_shuffle_size
)
if kw.get("dtype", None) and getattr(inputs[0], "dtype", None) is not None:
kw["dtype"] = inputs[0].dtype
if kw.get("name", None) and getattr(inputs[0], "name", None) is not None:
kw["name"] = inputs[0].dtype
return super(DataFrameIndexAlignMap, self)._create_chunk(output_idx, index, **kw)
|
def _create_chunk(self, output_idx, index, **kw):
inputs = self.inputs
if kw.get("index_value", None) is None and inputs[0].index_value is not None:
input_index_value = inputs[0].index_value
index_min_max = self.index_min_max
if index_min_max is not None:
kw["index_value"] = filter_index_value(input_index_value, index_min_max)
else:
kw["index_value"] = parse_index(
inputs[0].index_value.to_pandas(),
key=tokenize(input_index_value.key, type(self).__name__),
)
if (
kw.get("columns_value", None) is None
and getattr(inputs[0], "columns", None) is not None
):
input_columns_value = inputs[0].columns
input_dtypes = inputs[0].dtypes
column_min_max = self.column_min_max
if column_min_max is not None:
kw["columns_value"] = filter_index_value(
input_columns_value, column_min_max, store_data=True
)
else:
kw["columns_value"] = parse_index(
inputs[0].columns.to_pandas(),
key=tokenize(input_columns_value.key, type(self).__name__),
)
kw["dtypes"] = input_dtypes[kw["columns_value"].to_pandas()]
column_shuffle_size = self.column_shuffle_size
if column_shuffle_size is not None:
self._column_shuffle_segments = hash_dtypes(
input_dtypes, column_shuffle_size
)
if kw.get("dtype", None) and getattr(inputs[0], "dtype", None) is not None:
kw["dtype"] = inputs[0].dtype
if kw.get("name", None) and getattr(inputs[0], "name", None) is not None:
kw["name"] = inputs[0].dtype
return super(DataFrameIndexAlignMap, self)._create_chunk(output_idx, index, **kw)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _create_chunk(self, output_idx, index, **kw):
inputs = self.inputs
if (
kw.get("index_value", None) is None
and inputs[0].inputs[0].index_value is not None
):
index_align_map_chunks = inputs[0].inputs
if index_align_map_chunks[0].op.index_min_max is not None:
# shuffle on columns, all the DataFrameIndexAlignMap has the same index
kw["index_value"] = filter_index_value(
index_align_map_chunks[0].index_value,
index_align_map_chunks[0].op.index_min_max,
)
else:
# shuffle on index
kw["index_value"] = parse_index(
index_align_map_chunks[0].index_value.to_pandas(),
key=tokenize(
[c.key for c in index_align_map_chunks], type(self).__name__
),
)
if (
kw.get("columns_value", None) is None
and getattr(inputs[0].inputs[0], "columns_value", None) is not None
):
index_align_map_chunks = inputs[0].inputs
if index_align_map_chunks[0].op.column_min_max is not None:
# shuffle on index
kw["columns_value"] = filter_index_value(
index_align_map_chunks[0].columns_value,
index_align_map_chunks[0].op.column_min_max,
store_data=True,
)
kw["dtypes"] = index_align_map_chunks[0].dtypes[
kw["columns_value"].to_pandas()
]
else:
# shuffle on columns
all_dtypes = [
c.op.column_shuffle_segments[index[1]]
for c in index_align_map_chunks
if c.index[0] == index_align_map_chunks[0].index[0]
]
kw["dtypes"] = pd.concat(all_dtypes)
kw["columns_value"] = parse_index(kw["dtypes"].index, store_data=True)
if (
kw.get("dtype", None)
and getattr(inputs[0].inputs[0], "dtype", None) is not None
):
kw["dtype"] = inputs[0].inputs[0].dtype
if kw.get("name", None) and getattr(inputs[0].inputs[0], "name", None) is not None:
kw["name"] = inputs[0].inputs[0].dtype
return super(DataFrameIndexAlignReduce, self)._create_chunk(output_idx, index, **kw)
|
def _create_chunk(self, output_idx, index, **kw):
inputs = self.inputs
if (
kw.get("index_value", None) is None
and inputs[0].inputs[0].index_value is not None
):
index_align_map_chunks = inputs[0].inputs
if index_align_map_chunks[0].op.index_min_max is not None:
# shuffle on columns, all the DataFrameIndexAlignMap has the same index
kw["index_value"] = filter_index_value(
index_align_map_chunks[0].index_value,
index_align_map_chunks[0].op.index_min_max,
)
else:
# shuffle on index
kw["index_value"] = parse_index(
index_align_map_chunks[0].index_value.to_pandas(),
key=tokenize(
[c.key for c in index_align_map_chunks], type(self).__name__
),
)
if (
kw.get("columns_value", None) is None
and getattr(inputs[0].inputs[0], "columns", None) is not None
):
index_align_map_chunks = inputs[0].inputs
if index_align_map_chunks[0].op.column_min_max is not None:
# shuffle on index
kw["columns_value"] = filter_index_value(
index_align_map_chunks[0].columns,
index_align_map_chunks[0].op.column_min_max,
store_data=True,
)
kw["dtypes"] = index_align_map_chunks[0].dtypes[
kw["columns_value"].to_pandas()
]
else:
# shuffle on columns
all_dtypes = [
c.op.column_shuffle_segments[index[1]]
for c in index_align_map_chunks
if c.index[0] == index_align_map_chunks[0].index[0]
]
kw["dtypes"] = pd.concat(all_dtypes)
kw["columns_value"] = parse_index(kw["dtypes"].index, store_data=True)
if (
kw.get("dtype", None)
and getattr(inputs[0].inputs[0], "dtype", None) is not None
):
kw["dtype"] = inputs[0].inputs[0].dtype
if kw.get("name", None) and getattr(inputs[0].inputs[0], "name", None) is not None:
kw["name"] = inputs[0].inputs[0].dtype
return super(DataFrameIndexAlignReduce, self)._create_chunk(output_idx, index, **kw)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _get_chunk_index_min_max(index_chunks):
chunk_index_min_max = []
for chunk in index_chunks:
min_val = chunk.min_val
min_val_close = chunk.min_val_close
max_val = chunk.max_val
max_val_close = chunk.max_val_close
if min_val is None or max_val is None:
return
chunk_index_min_max.append((min_val, min_val_close, max_val, max_val_close))
return chunk_index_min_max
|
def _get_chunk_index_min_max(index, index_chunks):
chunk_index_min_max = []
for chunk in index_chunks:
min_val = chunk.min_val
min_val_close = chunk.min_val_close
max_val = chunk.max_val
max_val_close = chunk.max_val_close
if min_val is None or max_val is None:
return
chunk_index_min_max.append((min_val, min_val_close, max_val, max_val_close))
if index.is_monotonic_decreasing:
return list(reversed(chunk_index_min_max)), False
if _check_overlap(chunk_index_min_max):
return
return chunk_index_min_max, True
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _need_align_map(
input_chunk,
index_min_max,
column_min_max,
dummy_index_splits=False,
dummy_column_splits=False,
):
if not dummy_index_splits:
assert not pd.isnull(index_min_max[0]) and not pd.isnull(index_min_max[2])
if isinstance(input_chunk, SERIES_CHUNK_TYPE):
if input_chunk.index_value is None:
return True
if input_chunk.index_value.min_max != index_min_max:
return True
else:
if not dummy_index_splits:
if (
input_chunk.index_value is None
or input_chunk.index_value.min_max != index_min_max
):
return True
if not dummy_column_splits:
if (
input_chunk.columns_value is None
or input_chunk.columns_value.min_max != column_min_max
):
return True
return False
|
def _need_align_map(
input_chunk,
index_min_max,
column_min_max,
dummy_index_splits=False,
dummy_column_splits=False,
):
if not dummy_index_splits:
assert not pd.isnull(index_min_max[0]) and not pd.isnull(index_min_max[2])
if isinstance(input_chunk, SERIES_CHUNK_TYPE):
if input_chunk.index_value is None:
return True
if input_chunk.index_value.min_max != index_min_max:
return True
else:
if not dummy_index_splits:
if (
input_chunk.index_value is None
or input_chunk.index_value.min_max != index_min_max
):
return True
if not dummy_column_splits:
if (
input_chunk.columns is None
or input_chunk.columns.min_max != column_min_max
):
return True
return False
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _calc_axis_splits(left_axis, right_axis, left_axis_chunks, right_axis_chunks):
if _axis_need_shuffle(left_axis, right_axis, left_axis_chunks, right_axis_chunks):
# do shuffle
out_chunk_size = max(len(left_axis_chunks), len(right_axis_chunks))
return None, [np.nan for _ in range(out_chunk_size)]
else:
# no need to do shuffle on this axis
if _is_index_identical(left_axis_chunks, right_axis_chunks):
left_chunk_index_min_max = _get_chunk_index_min_max(left_axis_chunks)
right_splits = left_splits = [[c] for c in left_chunk_index_min_max]
right_increase = left_increase = None
elif len(left_axis_chunks) == 1 and len(right_axis_chunks) == 1:
left_splits = [_get_chunk_index_min_max(left_axis_chunks)]
left_increase = left_axis_chunks[0].is_monotonic_decreasing
right_splits = [_get_chunk_index_min_max(right_axis_chunks)]
right_increase = right_axis_chunks[0].is_monotonic_decreasing
else:
left_chunk_index_min_max, left_increase = (
_get_monotonic_chunk_index_min_max(left_axis, left_axis_chunks)
)
right_chunk_index_min_max, right_increase = (
_get_monotonic_chunk_index_min_max(right_axis, right_axis_chunks)
)
left_splits, right_splits = split_monotonic_index_min_max(
left_chunk_index_min_max,
left_increase,
right_chunk_index_min_max,
right_increase,
)
splits = _AxisMinMaxSplitInfo(
left_splits, left_increase, right_splits, right_increase
)
nsplits = [np.nan for _ in itertools.chain(*left_splits)]
return splits, nsplits
|
def _calc_axis_splits(left_axis, right_axis, left_axis_chunks, right_axis_chunks):
if _axis_need_shuffle(left_axis, right_axis, left_axis_chunks, right_axis_chunks):
# do shuffle
out_chunk_size = max(len(left_axis_chunks), len(right_axis_chunks))
return None, [np.nan for _ in range(out_chunk_size)]
else:
# no need to do shuffle on this axis
left_chunk_index_min_max, left_increase = _get_chunk_index_min_max(
left_axis, left_axis_chunks
)
right_chunk_index_min_max, right_increase = _get_chunk_index_min_max(
right_axis, right_axis_chunks
)
if len(left_chunk_index_min_max) == 1 and len(right_chunk_index_min_max) == 1:
# both left and right has only 1 chunk
left_splits, right_splits = (
[left_chunk_index_min_max],
[right_chunk_index_min_max],
)
else:
left_splits, right_splits = split_monotonic_index_min_max(
left_chunk_index_min_max,
left_increase,
right_chunk_index_min_max,
right_increase,
)
splits = _AxisMinMaxSplitInfo(
left_splits, left_increase, right_splits, right_increase
)
nsplits = [np.nan for _ in itertools.chain(*left_splits)]
return splits, nsplits
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _gen_dataframe_chunks(splits, out_shape, left_or_right, df):
out_chunks = []
if splits.all_axes_can_split():
# no shuffle for all axes
kw = {
"index_shuffle_size": -1 if splits[0].isdummy() else None,
"column_shuffle_size": -1 if splits[1].isdummy() else None,
}
for out_idx in itertools.product(*(range(s) for s in out_shape)):
row_idx = splits.get_axis_idx(0, left_or_right, out_idx[0])
col_idx = splits.get_axis_idx(1, left_or_right, out_idx[1])
index_min_max = splits.get_axis_split(0, left_or_right, out_idx[0])
column_min_max = splits.get_axis_split(1, left_or_right, out_idx[1])
chunk = df.cix[row_idx, col_idx]
if _need_align_map(
chunk,
index_min_max,
column_min_max,
splits[0].isdummy(),
splits[1].isdummy(),
):
if splits[1].isdummy():
dtypes = chunk.dtypes
else:
dtypes = filter_dtypes(chunk.dtypes, column_min_max)
chunk_kw = {
"index_value": chunk.index_value if splits[0].isdummy() else None,
"columns_value": chunk.columns_value
if splits[1].isdummy()
else None,
}
align_op = DataFrameIndexAlignMap(
index_min_max=index_min_max,
column_min_max=column_min_max,
dtypes=dtypes,
sparse=chunk.issparse(),
object_type=ObjectType.dataframe,
**kw,
)
out_chunk = align_op.new_chunk(
[chunk], shape=(np.nan, np.nan), index=out_idx, **chunk_kw
)
else:
out_chunk = chunk
out_chunks.append(out_chunk)
elif splits.one_axis_can_split():
# one axis needs shuffle
shuffle_axis = 0 if splits[0] is None else 1
align_axis = 1 - shuffle_axis
for align_axis_idx in range(out_shape[align_axis]):
if align_axis == 0:
kw = {
"index_min_max": splits.get_axis_split(
align_axis, left_or_right, align_axis_idx
),
"index_shuffle_size": -1 if splits[0].isdummy() else None,
"column_shuffle_size": out_shape[shuffle_axis],
}
input_idx = splits.get_axis_idx(
align_axis, left_or_right, align_axis_idx
)
else:
kw = {
"column_min_max": splits.get_axis_split(
align_axis, left_or_right, align_axis_idx
),
"index_shuffle_size": out_shape[shuffle_axis],
"column_shuffle_size": -1 if splits[1].isdummy() else None,
}
input_idx = splits.get_axis_idx(
align_axis, left_or_right, align_axis_idx
)
input_chunks = [c for c in df.chunks if c.index[align_axis] == input_idx]
map_chunks = []
for j, input_chunk in enumerate(input_chunks):
chunk_kw = dict()
if align_axis == 0:
chunk_kw["index_value"] = (
input_chunk.index_value if splits[0].isdummy() else None
)
else:
chunk_kw["columns_value"] = (
input_chunk.columns_value if splits[1].isdummy() else None
)
map_op = DataFrameIndexAlignMap(
sparse=input_chunk.issparse(),
object_type=ObjectType.dataframe,
**kw,
)
idx = [None, None]
idx[align_axis] = align_axis_idx
idx[shuffle_axis] = j
map_chunks.append(
map_op.new_chunk(
[input_chunk],
shape=(np.nan, np.nan),
index=tuple(idx),
**chunk_kw,
)
)
proxy_chunk = DataFrameShuffleProxy(
sparse=df.issparse(), object_type=ObjectType.dataframe
).new_chunk(map_chunks, shape=())
for j in range(out_shape[shuffle_axis]):
chunk_kw = dict()
if align_axis == 0:
chunk_kw["index_value"] = (
proxy_chunk.inputs[0].inputs[0].index_value
if splits[0].isdummy()
else None
)
else:
chunk_kw["columns_value"] = (
proxy_chunk.inputs[0].inputs[0].columns_value
if splits[1].isdummy()
else None
)
reduce_idx = (
(align_axis_idx, j) if align_axis == 0 else (j, align_axis_idx)
)
reduce_op = DataFrameIndexAlignReduce(
i=j,
sparse=proxy_chunk.issparse(),
shuffle_key=",".join(str(idx) for idx in reduce_idx),
object_type=ObjectType.dataframe,
)
out_chunks.append(
reduce_op.new_chunk(
[proxy_chunk],
shape=(np.nan, np.nan),
index=reduce_idx,
**chunk_kw,
)
)
out_chunks.sort(key=lambda c: c.index)
else:
# all axes need shuffle
assert splits.no_axis_can_split()
# gen map chunks
map_chunks = []
for chunk in df.chunks:
map_op = DataFrameIndexAlignMap(
sparse=chunk.issparse(),
index_shuffle_size=out_shape[0],
column_shuffle_size=out_shape[1],
object_type=ObjectType.dataframe,
)
map_chunks.append(
map_op.new_chunk([chunk], shape=(np.nan, np.nan), index=chunk.index)
)
proxy_chunk = DataFrameShuffleProxy(object_type=ObjectType.dataframe).new_chunk(
map_chunks, shape=()
)
# gen reduce chunks
for out_idx in itertools.product(*(range(s) for s in out_shape)):
reduce_op = DataFrameIndexAlignReduce(
i=out_idx,
sparse=proxy_chunk.issparse(),
shuffle_key=",".join(str(idx) for idx in out_idx),
object_type=ObjectType.dataframe,
)
out_chunks.append(
reduce_op.new_chunk(
[proxy_chunk], shape=(np.nan, np.nan), index=out_idx
)
)
return out_chunks
|
def _gen_dataframe_chunks(splits, out_shape, left_or_right, df):
out_chunks = []
if splits.all_axes_can_split():
# no shuffle for all axes
kw = {
"index_shuffle_size": -1 if splits[0].isdummy() else None,
"column_shuffle_size": -1 if splits[1].isdummy() else None,
}
for out_idx in itertools.product(*(range(s) for s in out_shape)):
row_idx = splits.get_axis_idx(0, left_or_right, out_idx[0])
col_idx = splits.get_axis_idx(1, left_or_right, out_idx[1])
index_min_max = splits.get_axis_split(0, left_or_right, out_idx[0])
column_min_max = splits.get_axis_split(1, left_or_right, out_idx[1])
chunk = df.cix[row_idx, col_idx]
if _need_align_map(
chunk,
index_min_max,
column_min_max,
splits[0].isdummy(),
splits[1].isdummy(),
):
if splits[1].isdummy():
dtypes = chunk.dtypes
else:
dtypes = filter_dtypes(chunk.dtypes, column_min_max)
chunk_kw = {
"index_value": chunk.index_value if splits[0].isdummy() else None,
"columns_value": chunk.columns if splits[1].isdummy() else None,
}
align_op = DataFrameIndexAlignMap(
index_min_max=index_min_max,
column_min_max=column_min_max,
dtypes=dtypes,
sparse=chunk.issparse(),
object_type=ObjectType.dataframe,
**kw,
)
out_chunk = align_op.new_chunk(
[chunk], shape=(np.nan, np.nan), index=out_idx, **chunk_kw
)
else:
out_chunk = chunk
out_chunks.append(out_chunk)
elif splits.one_axis_can_split():
# one axis needs shuffle
shuffle_axis = 0 if splits[0] is None else 1
align_axis = 1 - shuffle_axis
for align_axis_idx in range(out_shape[align_axis]):
if align_axis == 0:
kw = {
"index_min_max": splits.get_axis_split(
align_axis, left_or_right, align_axis_idx
),
"index_shuffle_size": -1 if splits[0].isdummy() else None,
"column_shuffle_size": out_shape[shuffle_axis],
}
input_idx = splits.get_axis_idx(
align_axis, left_or_right, align_axis_idx
)
else:
kw = {
"column_min_max": splits.get_axis_split(
align_axis, left_or_right, align_axis_idx
),
"index_shuffle_size": out_shape[shuffle_axis],
"column_shuffle_size": -1 if splits[1].isdummy() else None,
}
input_idx = splits.get_axis_idx(
align_axis, left_or_right, align_axis_idx
)
input_chunks = [c for c in df.chunks if c.index[align_axis] == input_idx]
map_chunks = []
for j, input_chunk in enumerate(input_chunks):
chunk_kw = dict()
if align_axis == 0:
chunk_kw["index_value"] = (
input_chunk.index_value if splits[0].isdummy() else None
)
else:
chunk_kw["columns_value"] = (
input_chunk.columns if splits[1].isdummy() else None
)
map_op = DataFrameIndexAlignMap(
sparse=input_chunk.issparse(),
object_type=ObjectType.dataframe,
**kw,
)
idx = [None, None]
idx[align_axis] = align_axis_idx
idx[shuffle_axis] = j
map_chunks.append(
map_op.new_chunk(
[input_chunk],
shape=(np.nan, np.nan),
index=tuple(idx),
**chunk_kw,
)
)
proxy_chunk = DataFrameShuffleProxy(
sparse=df.issparse(), object_type=ObjectType.dataframe
).new_chunk(map_chunks, shape=())
for j in range(out_shape[shuffle_axis]):
chunk_kw = dict()
if align_axis == 0:
chunk_kw["index_value"] = (
proxy_chunk.inputs[0].inputs[0].index_value
if splits[0].isdummy()
else None
)
else:
chunk_kw["columns_value"] = (
proxy_chunk.inputs[0].inputs[0].columns
if splits[1].isdummy()
else None
)
reduce_idx = (
(align_axis_idx, j) if align_axis == 0 else (j, align_axis_idx)
)
reduce_op = DataFrameIndexAlignReduce(
i=j,
sparse=proxy_chunk.issparse(),
shuffle_key=",".join(str(idx) for idx in reduce_idx),
object_type=ObjectType.dataframe,
)
out_chunks.append(
reduce_op.new_chunk(
[proxy_chunk],
shape=(np.nan, np.nan),
index=reduce_idx,
**chunk_kw,
)
)
out_chunks.sort(key=lambda c: c.index)
else:
# all axes need shuffle
assert splits.no_axis_can_split()
# gen map chunks
map_chunks = []
for chunk in df.chunks:
map_op = DataFrameIndexAlignMap(
sparse=chunk.issparse(),
index_shuffle_size=out_shape[0],
column_shuffle_size=out_shape[1],
object_type=ObjectType.dataframe,
)
map_chunks.append(
map_op.new_chunk([chunk], shape=(np.nan, np.nan), index=chunk.index)
)
proxy_chunk = DataFrameShuffleProxy(object_type=ObjectType.dataframe).new_chunk(
map_chunks, shape=()
)
# gen reduce chunks
for out_idx in itertools.product(*(range(s) for s in out_shape)):
reduce_op = DataFrameIndexAlignReduce(
i=out_idx,
sparse=proxy_chunk.issparse(),
shuffle_key=",".join(str(idx) for idx in out_idx),
object_type=ObjectType.dataframe,
)
out_chunks.append(
reduce_op.new_chunk(
[proxy_chunk], shape=(np.nan, np.nan), index=out_idx
)
)
return out_chunks
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def align_dataframe_dataframe(left, right):
left_index_chunks = [c.index_value for c in left.cix[:, 0]]
left_columns_chunks = [c.columns_value for c in left.cix[0, :]]
right_index_chunks = [c.index_value for c in right.cix[:, 0]]
right_columns_chunks = [c.columns_value for c in right.cix[0, :]]
index_splits, index_nsplits = _calc_axis_splits(
left.index_value, right.index_value, left_index_chunks, right_index_chunks
)
columns_splits, columns_nsplits = _calc_axis_splits(
left.columns_value,
right.columns_value,
left_columns_chunks,
right_columns_chunks,
)
nsplits = [index_nsplits, columns_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
splits = _MinMaxSplitInfo(index_splits, columns_splits)
left_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 0, left)
right_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 1, right)
return nsplits, out_chunk_shape, left_chunks, right_chunks
|
def align_dataframe_dataframe(left, right):
left_index_chunks = [c.index_value for c in left.cix[:, 0]]
left_columns_chunks = [c.columns for c in left.cix[0, :]]
right_index_chunks = [c.index_value for c in right.cix[:, 0]]
right_columns_chunks = [c.columns for c in right.cix[0, :]]
index_splits, index_nsplits = _calc_axis_splits(
left.index_value, right.index_value, left_index_chunks, right_index_chunks
)
columns_splits, columns_nsplits = _calc_axis_splits(
left.columns, right.columns, left_columns_chunks, right_columns_chunks
)
nsplits = [index_nsplits, columns_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
splits = _MinMaxSplitInfo(index_splits, columns_splits)
left_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 0, left)
right_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 1, right)
return nsplits, out_chunk_shape, left_chunks, right_chunks
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def align_dataframe_series(left, right, axis="columns"):
if axis == "columns" or axis == 1:
left_columns_chunks = [c.columns_value for c in left.cix[0, :]]
right_index_chunks = [c.index_value for c in right.chunks]
index_splits, index_nsplits = _calc_axis_splits(
left.columns_value,
right.index_value,
left_columns_chunks,
right_index_chunks,
)
dummy_splits, dummy_nsplits = (
_build_dummy_axis_split(left.chunk_shape[0]),
left.nsplits[0],
)
nsplits = [dummy_nsplits, index_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
left_chunks = _gen_dataframe_chunks(
_MinMaxSplitInfo(dummy_splits, index_splits), out_chunk_shape, 0, left
)
right_chunks = _gen_series_chunks(
_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[1],), 1, right
)
else:
assert axis == "index" or axis == 0
left_index_chunks = [c.index_value for c in left.cix[:, 0]]
right_index_chunks = [c.index_value for c in right.chunks]
index_splits, index_nsplits = _calc_axis_splits(
left.index_value, right.index_value, left_index_chunks, right_index_chunks
)
dummy_splits, dummy_nsplits = (
_build_dummy_axis_split(left.chunk_shape[1]),
left.nsplits[1],
)
nsplits = [index_nsplits, dummy_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
left_chunks = _gen_dataframe_chunks(
_MinMaxSplitInfo(index_splits, dummy_splits), out_chunk_shape, 0, left
)
right_chunks = _gen_series_chunks(
_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[0],), 1, right
)
return nsplits, out_chunk_shape, left_chunks, right_chunks
|
def align_dataframe_series(left, right, axis="columns"):
if axis == "columns" or axis == 1:
left_columns_chunks = [c.columns for c in left.cix[0, :]]
right_index_chunks = [c.index_value for c in right.chunks]
index_splits, index_nsplits = _calc_axis_splits(
left.columns, right.index_value, left_columns_chunks, right_index_chunks
)
dummy_splits, dummy_nsplits = (
_build_dummy_axis_split(left.chunk_shape[0]),
left.nsplits[0],
)
nsplits = [dummy_nsplits, index_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
left_chunks = _gen_dataframe_chunks(
_MinMaxSplitInfo(dummy_splits, index_splits), out_chunk_shape, 0, left
)
right_chunks = _gen_series_chunks(
_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[1],), 1, right
)
else:
assert axis == "index" or axis == 0
left_index_chunks = [c.index_value for c in left.cix[:, 0]]
right_index_chunks = [c.index_value for c in right.chunks]
index_splits, index_nsplits = _calc_axis_splits(
left.index_value, right.index_value, left_index_chunks, right_index_chunks
)
dummy_splits, dummy_nsplits = (
_build_dummy_axis_split(left.chunk_shape[1]),
left.nsplits[1],
)
nsplits = [index_nsplits, dummy_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
left_chunks = _gen_dataframe_chunks(
_MinMaxSplitInfo(index_splits, dummy_splits), out_chunk_shape, 0, left
)
right_chunks = _gen_series_chunks(
_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[0],), 1, right
)
return nsplits, out_chunk_shape, left_chunks, right_chunks
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def add(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
return op(df, other)
|
def add(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
return op(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def radd(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df)
return op.rcall(df, other)
|
def radd(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df)
return op.rcall(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _tile_both_dataframes(cls, op):
# if both of the inputs are DataFrames, axis is just ignored
left, right = op.lhs, op.rhs
df = op.outputs[0]
nsplits, out_shape, left_chunks, right_chunks = align_dataframe_dataframe(
left, right
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for idx, left_chunk, right_chunk in zip(
out_chunk_indexes, left_chunks, right_chunks
):
out_chunk = (
op.copy()
.reset_key()
.new_chunk([left_chunk, right_chunk], shape=(np.nan, np.nan), index=idx)
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
def _tile_both_dataframes(cls, op):
# if both of the inputs are DataFrames, axis is just ignored
left, right = op.lhs, op.rhs
df = op.outputs[0]
nsplits, out_shape, left_chunks, right_chunks = align_dataframe_dataframe(
left, right
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for idx, left_chunk, right_chunk in zip(
out_chunk_indexes, left_chunks, right_chunks
):
out_chunk = (
op.copy()
.reset_key()
.new_chunk([left_chunk, right_chunk], shape=(np.nan, np.nan), index=idx)
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _tile_dataframe_series(cls, op):
left, right = op.lhs, op.rhs
df = op.outputs[0]
nsplits, out_shape, left_chunks, right_chunks = align_dataframe_series(
left, right, axis=op.axis
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for out_idx, df_chunk in zip(out_chunk_indexes, left_chunks):
if op.axis == "columns" or op.axis == 1:
series_chunk = right_chunks[out_idx[1]]
kw = {
"shape": (df_chunk.shape[0], np.nan),
"index_value": df_chunk.index_value,
}
else:
series_chunk = right_chunks[out_idx[0]]
kw = {
"shape": (np.nan, df_chunk.shape[1]),
"columns_value": df_chunk.columns_value,
}
out_chunk = (
op.copy()
.reset_key()
.new_chunk([df_chunk, series_chunk], index=out_idx, **kw)
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
def _tile_dataframe_series(cls, op):
left, right = op.lhs, op.rhs
df = op.outputs[0]
nsplits, out_shape, left_chunks, right_chunks = align_dataframe_series(
left, right, axis=op.axis
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for out_idx, df_chunk in zip(out_chunk_indexes, left_chunks):
if op.axis == "columns" or op.axis == 1:
series_chunk = right_chunks[out_idx[1]]
kw = {
"shape": (df_chunk.shape[0], np.nan),
"index_value": df_chunk.index_value,
}
else:
series_chunk = right_chunks[out_idx[0]]
kw = {
"shape": (np.nan, df_chunk.shape[1]),
"columns_value": df_chunk.columns,
}
out_chunk = (
op.copy()
.reset_key()
.new_chunk([df_chunk, series_chunk], index=out_idx, **kw)
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _tile_series_dataframe(cls, op):
left, right = op.lhs, op.rhs
df = op.outputs[0]
nsplits, out_shape, right_chunks, left_chunks = align_dataframe_series(
right, left, axis=op.axis
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for out_idx, df_chunk in zip(out_chunk_indexes, right_chunks):
if op.axis == "columns" or op.axis == 1:
series_chunk = left_chunks[out_idx[1]]
kw = {
"shape": (df_chunk.shape[0], np.nan),
"index_value": df_chunk.index_value,
}
else:
series_chunk = left_chunks[out_idx[0]]
kw = {
"shape": (df_chunk.shape[0], np.nan),
"index_value": df_chunk.index_value,
}
out_chunk = (
op.copy()
.reset_key()
.new_chunk([series_chunk, df_chunk], index=out_idx, **kw)
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
def _tile_series_dataframe(cls, op):
left, right = op.lhs, op.rhs
df = op.outputs[0]
nsplits, out_shape, right_chunks, left_chunks = align_dataframe_series(
right, left, axis=op.axis
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for out_idx, df_chunk in zip(out_chunk_indexes, right_chunks):
if op.axis == "columns" or op.axis == 1:
series_chunk = left_chunks[out_idx[1]]
kw = {
"shape": (df_chunk.shape[0], np.nan),
"index_value": df_chunk.index_value,
}
else:
series_chunk = left_chunks[out_idx[0]]
kw = {
"shape": (df_chunk.shape[0], np.nan),
"index_value": df_chunk.index_value,
}
out_chunk = (
op.copy()
.reset_key()
.new_chunk([series_chunk, df_chunk], index=out_idx, **kw)
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _tile_scalar(cls, op):
tileable = op.rhs if np.isscalar(op.lhs) else op.lhs
df = op.outputs[0]
out_chunks = []
for chunk in tileable.chunks:
out_op = op.copy().reset_key()
if isinstance(chunk, DATAFRAME_CHUNK_TYPE):
out_chunk = out_op.new_chunk(
[chunk],
shape=chunk.shape,
index=chunk.index,
dtypes=chunk.dtypes,
index_value=chunk.index_value,
columns_value=getattr(chunk, "columns_value"),
)
else:
out_chunk = out_op.new_chunk(
[chunk],
shape=chunk.shape,
index=chunk.index,
dtype=chunk.dtype,
index_value=chunk.index_value,
name=getattr(chunk, "name"),
)
out_chunks.append(out_chunk)
new_op = op.copy()
if isinstance(df, SERIES_TYPE):
return new_op.new_seriess(
op.inputs,
df.shape,
nsplits=tileable.nsplits,
dtype=df.dtype,
index_value=df.index_value,
name=df.name,
chunks=out_chunks,
)
else:
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tileable.nsplits,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
chunks=out_chunks,
)
|
def _tile_scalar(cls, op):
tileable = op.rhs if np.isscalar(op.lhs) else op.lhs
df = op.outputs[0]
out_chunks = []
for chunk in tileable.chunks:
out_op = op.copy().reset_key()
if isinstance(chunk, DATAFRAME_CHUNK_TYPE):
out_chunk = out_op.new_chunk(
[chunk],
shape=chunk.shape,
index=chunk.index,
dtypes=chunk.dtypes,
index_value=chunk.index_value,
columns_value=getattr(chunk, "columns"),
)
else:
out_chunk = out_op.new_chunk(
[chunk],
shape=chunk.shape,
index=chunk.index,
dtype=chunk.dtype,
index_value=chunk.index_value,
name=getattr(chunk, "name"),
)
out_chunks.append(out_chunk)
new_op = op.copy()
if isinstance(df, SERIES_TYPE):
return new_op.new_seriess(
op.inputs,
df.shape,
nsplits=tileable.nsplits,
dtype=df.dtype,
index_value=df.index_value,
name=df.name,
chunks=out_chunks,
)
else:
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tileable.nsplits,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
chunks=out_chunks,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _calc_properties(cls, x1, x2=None, axis="columns"):
if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and (
x2 is None or np.isscalar(x2)
):
# FIXME infer the dtypes of result df properly
return {
"shape": x1.shape,
"dtypes": x1.dtypes,
"columns_value": x1.columns_value,
"index_value": x1.index_value,
}
if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and (
x2 is None or np.isscalar(x2)
):
dtype = find_common_type([x1.dtype, type(x2)])
return {"shape": x1.shape, "dtype": dtype, "index_value": x1.index_value}
if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance(
x2, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)
):
index_shape, column_shape, dtypes, columns, index = (
np.nan,
np.nan,
None,
None,
None,
)
if (
x1.columns_value is not None
and x2.columns_value is not None
and x1.columns_value.key == x2.columns_value.key
):
dtypes = x1.dtypes
columns = copy.copy(x1.columns_value)
columns.value.should_be_monotonic = False
column_shape = len(dtypes)
elif x1.dtypes is not None and x2.dtypes is not None:
dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator)
columns = parse_index(dtypes.index, store_data=True)
columns.value.should_be_monotonic = True
column_shape = len(dtypes)
if x1.index_value is not None and x2.index_value is not None:
if x1.index_value.key == x2.index_value.key:
index = copy.copy(x1.index_value)
index.value.should_be_monotonic = False
index_shape = x1.shape[0]
else:
index = infer_index_value(x1.index_value, x2.index_value)
index.value.should_be_monotonic = True
if index.key == x1.index_value.key == x2.index_value.key and (
not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0])
):
index_shape = (
x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0]
)
return {
"shape": (index_shape, column_shape),
"dtypes": dtypes,
"columns_value": columns,
"index_value": index,
}
if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance(
x2, (SERIES_TYPE, SERIES_CHUNK_TYPE)
):
if axis == "columns" or axis == 1:
index_shape = x1.shape[0]
index = x1.index_value
column_shape, dtypes, columns = np.nan, None, None
if x1.columns_value is not None and x1.index_value is not None:
if x1.columns_value.key == x2.index_value.key:
dtypes = x1.dtypes
columns = copy.copy(x1.columns_value)
columns.value.should_be_monotonic = False
column_shape = len(dtypes)
else:
dtypes = x1.dtypes # FIXME
columns = infer_index_value(x1.columns_value, x2.index_value)
columns.value.should_be_monotonic = True
column_shape = np.nan
else:
assert axis == "index" or axis == 0
column_shape = x1.shape[1]
columns = x1.columns_value
dtypes = x1.dtypes
index_shape, index = np.nan, None
if x1.index_value is not None and x1.index_value is not None:
if x1.index_value.key == x2.index_value.key:
index = copy.copy(x1.columns_value)
index.value.should_be_monotonic = False
index_shape = x1.shape[0]
else:
index = infer_index_value(x1.index_value, x2.index_value)
index.value.should_be_monotonic = True
index_shape = np.nan
return {
"shape": (index_shape, column_shape),
"dtypes": dtypes,
"columns_value": columns,
"index_value": index,
}
if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and isinstance(
x2, (SERIES_TYPE, SERIES_CHUNK_TYPE)
):
index_shape, dtype, index = np.nan, None, None
dtype = find_common_type([x1.dtype, x2.dtype])
if x1.index_value is not None and x2.index_value is not None:
if x1.index_value.key == x2.index_value.key:
index = copy.copy(x1.index_value)
index.value.should_be_monotonic = False
index_shape = x1.shape[0]
else:
index = infer_index_value(x1.index_value, x2.index_value)
index.value.should_be_monotonic = True
if index.key == x1.index_value.key == x2.index_value.key and (
not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0])
):
index_shape = (
x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0]
)
return {"shape": (index_shape,), "dtype": dtype, "index_value": index}
raise NotImplementedError("Unknown combination of parameters")
|
def _calc_properties(cls, x1, x2=None, axis="columns"):
if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and (
x2 is None or np.isscalar(x2)
):
# FIXME infer the dtypes of result df properly
return {
"shape": x1.shape,
"dtypes": x1.dtypes,
"columns_value": x1.columns,
"index_value": x1.index_value,
}
if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and (
x2 is None or np.isscalar(x2)
):
dtype = find_common_type([x1.dtype, type(x2)])
return {"shape": x1.shape, "dtype": dtype, "index_value": x1.index_value}
if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance(
x2, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)
):
index_shape, column_shape, dtypes, columns, index = (
np.nan,
np.nan,
None,
None,
None,
)
if (
x1.columns is not None
and x2.columns is not None
and x1.columns.key == x2.columns.key
):
dtypes = x1.dtypes
columns = copy.copy(x1.columns)
columns.value.should_be_monotonic = True
column_shape = len(dtypes)
elif x1.dtypes is not None and x2.dtypes is not None:
dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator)
columns = parse_index(dtypes.index, store_data=True)
columns.value.should_be_monotonic = True
column_shape = len(dtypes)
if x1.index_value is not None and x2.index_value is not None:
if x1.index_value.key == x2.index_value.key:
index = copy.copy(x1.index_value)
index.value.should_be_monotonic = True
index_shape = x1.shape[0]
else:
index = infer_index_value(x1.index_value, x2.index_value)
index.value.should_be_monotonic = True
if index.key == x1.index_value.key == x2.index_value.key and (
not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0])
):
index_shape = (
x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0]
)
return {
"shape": (index_shape, column_shape),
"dtypes": dtypes,
"columns_value": columns,
"index_value": index,
}
if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance(
x2, (SERIES_TYPE, SERIES_CHUNK_TYPE)
):
if axis == "columns" or axis == 1:
index_shape = x1.shape[0]
index = x1.index_value
column_shape, dtypes, columns = np.nan, None, None
if x1.columns is not None and x1.index_value is not None:
if x1.columns.key == x2.index_value.key:
dtypes = x1.dtypes
columns = copy.copy(x1.columns)
columns.value.should_be_monotonic = True
column_shape = len(dtypes)
else:
dtypes = x1.dtypes # FIXME
columns = infer_index_value(x1.columns, x2.index_value)
columns.value.should_be_monotonic = True
column_shape = np.nan
else:
assert axis == "index" or axis == 0
column_shape = x1.shape[1]
columns = x1.columns
dtypes = x1.dtypes
index_shape, index = np.nan, None
if x1.index_value is not None and x1.index_value is not None:
if x1.columns.key == x2.index_value.key:
index = copy.copy(x1.columns)
index.value.should_be_monotonic = True
index_shape = x1.shape[0]
else:
index = infer_index_value(x1.index_value, x2.index_value)
index.value.should_be_monotonic = True
index_shape = np.nan
return {
"shape": (index_shape, column_shape),
"dtypes": dtypes,
"columns_value": columns,
"index_value": index,
}
if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and isinstance(
x2, (SERIES_TYPE, SERIES_CHUNK_TYPE)
):
index_shape, dtype, index = np.nan, None, None
dtype = find_common_type([x1.dtype, x2.dtype])
if x1.index_value is not None and x2.index_value is not None:
if x1.index_value.key == x2.index_value.key:
index = copy.copy(x1.index_value)
index.value.should_be_monotonic = True
index_shape = x1.shape[0]
else:
index = infer_index_value(x1.index_value, x2.index_value)
index.value.should_be_monotonic = True
if index.key == x1.index_value.key == x2.index_value.key and (
not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0])
):
index_shape = (
x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0]
)
return {"shape": (index_shape,), "dtype": dtype, "index_value": index}
raise NotImplementedError("Unknown combination of parameters")
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def __call__(self, x1, x2):
x1 = self._process_input(x1)
x2 = self._process_input(x2)
if isinstance(x1, SERIES_TYPE) and isinstance(x2, DATAFRAME_TYPE):
# reject invoking series's op on dataframe
raise NotImplementedError
return self._call(x1, x2)
|
def __call__(self, x1, x2):
if isinstance(x1, SERIES_TYPE) and isinstance(x2, DATAFRAME_TYPE):
# reject invokeing series's op on dataframe
raise NotImplementedError
return self._call(x1, x2)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def rcall(self, x1, x2):
x1 = self._process_input(x1)
x2 = self._process_input(x2)
if isinstance(x1, SERIES_TYPE) and isinstance(x2, DATAFRAME_TYPE):
# reject invoking series's op on dataframe
raise NotImplementedError
return self._call(x2, x1)
|
def rcall(self, x1, x2):
if isinstance(x1, SERIES_TYPE) and isinstance(x2, DATAFRAME_TYPE):
# reject invokeing series's op on dataframe
raise NotImplementedError
return self._call(x2, x1)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
out_chunks = []
for in_chunk in in_df.chunks:
out_op = op.copy().reset_key()
out_chunk = out_op.new_chunk(
[in_chunk],
shape=in_chunk.shape,
index=in_chunk.index,
index_value=in_chunk.index_value,
columns_value=in_chunk.columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=in_df.nsplits,
)
|
def tile(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
out_chunks = []
for in_chunk in in_df.chunks:
out_op = op.copy().reset_key()
out_chunk = out_op.new_chunk(
[in_chunk],
shape=in_chunk.shape,
index=in_chunk.index,
index_value=in_chunk.index_value,
columns_value=in_chunk.columns,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=in_df.nsplits,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def __call__(self, df):
return self.new_dataframe(
[df],
df.shape,
dtypes=df.dtypes,
columns_value=df.columns_value,
index_value=df.index_value,
)
|
def __call__(self, df):
return self.new_dataframe(
[df],
df.shape,
dtypes=df.dtypes,
columns_value=df.columns,
index_value=df.index_value,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def floordiv(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameFloorDiv(
axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other
)
return op(df, other)
|
def floordiv(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameFloorDiv(
axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other
)
return op(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def rfloordiv(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameFloorDiv(
axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df
)
return op.rcall(df, other)
|
def rfloordiv(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameFloorDiv(
axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df
)
return op.rcall(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def subtract(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameSubtract(
axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other
)
return op(df, other)
|
def subtract(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameSubtract(
axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other
)
return op(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def rsubtract(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameSubtract(
axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df
)
return op.rcall(df, other)
|
def rsubtract(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameSubtract(
axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df
)
return op.rcall(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def truediv(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameTrueDiv(
axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other
)
return op(df, other)
|
def truediv(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameTrueDiv(
axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other
)
return op(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def rtruediv(df, other, axis="columns", level=None, fill_value=None):
op = DataFrameTrueDiv(
axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df
)
return op.rcall(df, other)
|
def rtruediv(df, other, axis="columns", level=None, fill_value=None):
other = wrap_sequence(other)
op = DataFrameTrueDiv(
axis=axis, level=level, fill_value=fill_value, lhs=other, rhs=df
)
return op.rcall(df, other)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def __call__(self, obj):
if isinstance(obj, DATAFRAME_TYPE):
self._object_type = ObjectType.dataframe
return self.new_dataframe(
[obj],
shape=obj.shape,
dtypes=obj.dtypes,
index_value=obj.index_value,
columns_value=obj.columns_value,
)
else:
assert isinstance(obj, SERIES_TYPE)
self._object_type = ObjectType.series
return self.new_series(
[obj],
shape=obj.shape,
dtype=obj.dtype,
index_value=obj.index_value,
name=obj.name,
)
|
def __call__(self, obj):
if isinstance(obj, DATAFRAME_TYPE):
self._object_type = ObjectType.dataframe
return self.new_dataframe(
[obj],
shape=obj.shape,
dtypes=obj.dtypes,
index_value=obj.index_value,
columns_value=obj.columns,
)
else:
assert isinstance(obj, SERIES_TYPE)
self._object_type = ObjectType.series
return self.new_series(
[obj],
shape=obj.shape,
dtype=obj.dtype,
index_value=obj.index_value,
name=obj.name,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def params(self):
# params return the properties which useful to rebuild a new chunk
return {
"shape": self.shape,
"dtypes": self.dtypes,
"index": self.index,
"index_value": self.index_value,
"columns_value": self.columns_value,
}
|
def params(self):
# params return the properties which useful to rebuild a new chunk
return {
"shape": self.shape,
"dtypes": self.dtypes,
"index": self.index,
"index_value": self.index_value,
"columns_value": self.columns,
}
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def params(self):
# params return the properties which useful to rebuild a new tileable object
return {
"shape": self.shape,
"dtypes": self.dtypes,
"index_value": self.index_value,
"columns_value": self.columns_value,
}
|
def params(self):
# params return the properties which useful to rebuild a new tileable object
return {
"shape": self.shape,
"dtypes": self.dtypes,
"index_value": self.index_value,
"columns_value": self.columns,
}
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
df = op.outputs[0]
raw_df = op.data
memory_usage = raw_df.memory_usage(index=False, deep=True)
chunk_size = df.extra_params.raw_chunk_size or options.tensor.chunk_size
chunk_size = decide_dataframe_chunk_sizes(df.shape, chunk_size, memory_usage)
chunk_size_idxes = (range(len(size)) for size in chunk_size)
out_chunks = []
for chunk_shape, chunk_idx in izip(
itertools.product(*chunk_size), itertools.product(*chunk_size_idxes)
):
chunk_op = op.copy().reset_key()
slc = get_chunk_slices(chunk_size, chunk_idx)
chunk_op._data = raw_df.iloc[slc]
chunk_op._dtypes = chunk_op._data.dtypes
out_chunk = chunk_op.new_chunk(
None,
shape=chunk_shape,
index=chunk_idx,
index_value=parse_index(chunk_op.data.index),
columns_value=parse_index(chunk_op.data.columns, store_data=True),
dtypes=chunk_op._data.dtypes,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
None,
df.shape,
dtypes=op.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
chunks=out_chunks,
nsplits=chunk_size,
)
|
def tile(cls, op):
df = op.outputs[0]
raw_df = op.data
memory_usage = raw_df.memory_usage(index=False, deep=True)
chunk_size = df.extra_params.raw_chunk_size or options.tensor.chunk_size
chunk_size = decide_dataframe_chunk_sizes(df.shape, chunk_size, memory_usage)
chunk_size_idxes = (range(len(size)) for size in chunk_size)
out_chunks = []
for chunk_shape, chunk_idx in izip(
itertools.product(*chunk_size), itertools.product(*chunk_size_idxes)
):
chunk_op = op.copy().reset_key()
slc = get_chunk_slices(chunk_size, chunk_idx)
chunk_op._data = raw_df.iloc[slc]
chunk_op._dtypes = chunk_op._data.dtypes
out_chunk = chunk_op.new_chunk(
None,
shape=chunk_shape,
index=chunk_idx,
index_value=parse_index(chunk_op.data.index),
columns_value=parse_index(chunk_op.data.columns, store_data=True),
dtypes=chunk_op._data.dtypes,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
None,
df.shape,
dtypes=op.dtypes,
index_value=df.index_value,
columns_value=df.columns,
chunks=out_chunks,
nsplits=chunk_size,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
df = op.outputs[0]
tensor = op.inputs[0]
nsplit_acc = np.cumsum(tensor.nsplits[0])
out_chunks = []
for chunk in tensor.chunks:
begin_index = nsplit_acc[chunk.index[0]] - chunk.shape[0]
end_index = nsplit_acc[chunk.index[0]]
chunk_index_value = parse_index(
pd.RangeIndex(start=begin_index, stop=end_index)
)
# Here the `new_chunk` is tricky:
#
# We can construct tensor that have identifcal chunks, for example, from `mt.ones(...)`, we know
# that after tiling the chunk of the same shape (but at different position) in `mt.ones` is indeed
# the same chunk (has the same key)!
#
# Thus, when we construct dataframe from such tensor, we will have dataframe chunks that only differ
# in `index_value`. However the `index_value` field won't be used to calculate the chunk key of
# the dataframe chunk, thus `new_chunk` generated the same keys for those indeed different chunks
# (they have different `index_values`).
#
# Here, we construct new chunk with some unique `_extra_params` to make the `new_chunk` work as
# expected.
chunk_op = op.copy().reset_key()
chunk_op.extra_params["begin_index"] = begin_index
chunk_op.extra_params["end_index"] = end_index
out_chunk = chunk_op.new_chunk(
[chunk],
shape=(chunk.shape[0], df.shape[1]),
index=(chunk.index[0], 0),
dtypes=df.dtypes,
index_value=chunk_index_value,
columns_value=df.columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
[tensor],
df.shape,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
chunks=out_chunks,
nsplits=[tensor.nsplits[0], [df.shape[1]]],
)
|
def tile(cls, op):
df = op.outputs[0]
tensor = op.inputs[0]
nsplit_acc = np.cumsum(tensor.nsplits[0])
out_chunks = []
for chunk in tensor.chunks:
begin_index = nsplit_acc[chunk.index[0]] - chunk.shape[0]
end_index = nsplit_acc[chunk.index[0]]
chunk_index_value = parse_index(
pd.RangeIndex(start=begin_index, stop=end_index)
)
# Here the `new_chunk` is tricky:
#
# We can construct tensor that have identifcal chunks, for example, from `mt.ones(...)`, we know
# that after tiling the chunk of the same shape (but at different position) in `mt.ones` is indeed
# the same chunk (has the same key)!
#
# Thus, when we construct dataframe from such tensor, we will have dataframe chunks that only differ
# in `index_value`. However the `index_value` field won't be used to calculate the chunk key of
# the dataframe chunk, thus `new_chunk` generated the same keys for those indeed different chunks
# (they have different `index_values`).
#
# Here, we construct new chunk with some unique `_extra_params` to make the `new_chunk` work as
# expected.
chunk_op = op.copy().reset_key()
chunk_op.extra_params["begin_index"] = begin_index
chunk_op.extra_params["end_index"] = end_index
out_chunk = chunk_op.new_chunk(
[chunk],
shape=(chunk.shape[0], df.shape[1]),
index=(chunk.index[0], 0),
dtypes=df.dtypes,
index_value=chunk_index_value,
columns_value=df.columns,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
[tensor],
df.shape,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
chunks=out_chunks,
nsplits=[tensor.nsplits[0], [df.shape[1]]],
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def execute(cls, ctx, op):
chunk = op.outputs[0]
ctx[chunk.key] = pd.DataFrame.from_records(
ctx[op.inputs[0].key],
index=chunk.index_value.to_pandas(),
columns=chunk.columns_value.to_pandas(),
exclude=op.exclude,
coerce_float=op.coerce_float,
nrows=op.nrows,
)
|
def execute(cls, ctx, op):
chunk = op.outputs[0]
ctx[chunk.key] = pd.DataFrame.from_records(
ctx[op.inputs[0].key],
index=chunk.index_value.to_pandas(),
columns=chunk.columns.to_pandas(),
exclude=op.exclude,
coerce_float=op.coerce_float,
nrows=op.nrows,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
out_df = op.outputs[0]
in_tensor = op.input
out_chunks = []
nsplits = in_tensor.nsplits
if any(any(np.isnan(ns)) for ns in nsplits):
raise NotImplementedError("NAN shape is not supported in DataFrame")
cum_size = [np.cumsum(s) for s in nsplits]
for in_chunk in in_tensor.chunks:
out_op = op.copy().reset_key()
if in_chunk.ndim == 1:
(i,) = in_chunk.index
column_stop = 1
index = (in_chunk.index[0], 0)
columns_value = parse_index(
out_df.columns_value.to_pandas()[0:1], store_data=True
)
else:
i, j = in_chunk.index
column_stop = cum_size[1][j]
index = in_chunk.index
columns_value = parse_index(
out_df.columns_value.to_pandas()[
column_stop - in_chunk.shape[1] : column_stop
],
store_data=True,
)
index_stop = cum_size[0][i]
if out_df.index_value.has_value():
index_value = parse_index(
out_df.index_value.to_pandas()[
index_stop - in_chunk.shape[0] : index_stop
],
store_data=True,
)
else:
index_value = parse_index(
pd.RangeIndex(start=index_stop - in_chunk.shape[0], stop=index_stop)
)
out_op.extra_params["index_stop"] = index_stop
out_op.extra_params["column_stop"] = column_stop
out_chunk = out_op.new_chunk(
[in_chunk],
shape=in_chunk.shape,
index=index,
index_value=index_value,
columns_value=columns_value,
)
out_chunks.append(out_chunk)
if in_tensor.ndim == 1:
nsplits = in_tensor.nsplits + ((1,),)
else:
nsplits = in_tensor.nsplits
new_op = op.copy()
return new_op.new_dataframes(
out_df.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=nsplits,
)
|
def tile(cls, op):
out_df = op.outputs[0]
in_tensor = op.input
out_chunks = []
nsplits = in_tensor.nsplits
if any(any(np.isnan(ns)) for ns in nsplits):
raise NotImplementedError("NAN shape is not supported in DataFrame")
cum_size = [np.cumsum(s) for s in nsplits]
for in_chunk in in_tensor.chunks:
out_op = op.copy().reset_key()
if in_chunk.ndim == 1:
(i,) = in_chunk.index
column_stop = 1
index = (in_chunk.index[0], 0)
columns_value = parse_index(
out_df.columns.to_pandas()[0:1], store_data=True
)
else:
i, j = in_chunk.index
column_stop = cum_size[1][j]
index = in_chunk.index
columns_value = parse_index(
out_df.columns.to_pandas()[
column_stop - in_chunk.shape[1] : column_stop
],
store_data=True,
)
index_stop = cum_size[0][i]
if out_df.index_value.has_value():
index_value = parse_index(
out_df.index_value.to_pandas()[
index_stop - in_chunk.shape[0] : index_stop
],
store_data=True,
)
else:
index_value = parse_index(
pd.RangeIndex(start=index_stop - in_chunk.shape[0], stop=index_stop)
)
out_op.extra_params["index_stop"] = index_stop
out_op.extra_params["column_stop"] = column_stop
out_chunk = out_op.new_chunk(
[in_chunk],
shape=in_chunk.shape,
index=index,
index_value=index_value,
columns_value=columns_value,
)
out_chunks.append(out_chunk)
if in_tensor.ndim == 1:
nsplits = in_tensor.nsplits + ((1,),)
else:
nsplits = in_tensor.nsplits
new_op = op.copy()
return new_op.new_dataframes(
out_df.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=nsplits,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def execute(cls, ctx, op):
chunk = op.outputs[0]
tensor_data = ctx[op.inputs[0].key]
ctx[chunk.key] = pd.DataFrame(
tensor_data,
index=chunk.index_value.to_pandas(),
columns=chunk.columns_value.to_pandas(),
)
|
def execute(cls, ctx, op):
chunk = op.outputs[0]
tensor_data = ctx[op.inputs[0].key]
ctx[chunk.key] = pd.DataFrame(
tensor_data,
index=chunk.index_value.to_pandas(),
columns=chunk.columns.to_pandas(),
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
in_df = build_concated_rows_frame(op.inputs[0])
out_df = op.outputs[0]
# First, perform groupby and aggregation on each chunk.
agg_chunks = []
for chunk in in_df.chunks:
agg_op = op.copy().reset_key()
agg_op._stage = Stage.agg
agg_chunk = agg_op.new_chunk(
[chunk],
shape=out_df.shape,
index=chunk.index,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
)
agg_chunks.append(agg_chunk)
# Shuffle the aggregation chunk.
reduce_chunks = cls._gen_shuffle_chunks(op, in_df, agg_chunks)
# Combine groups
combine_chunks = []
for chunk in reduce_chunks:
combine_op = op.copy().reset_key()
combine_op._stage = Stage.combine
combine_chunk = combine_op.new_chunk(
[chunk],
shape=out_df.shape,
index=chunk.index,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
)
combine_chunks.append(combine_chunk)
new_op = op.copy()
return new_op.new_dataframes(
[in_df],
shape=out_df.shape,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=combine_chunks,
nsplits=((np.nan,) * len(combine_chunks), (out_df.shape[1],)),
)
|
def tile(cls, op):
in_df = build_concated_rows_frame(op.inputs[0])
out_df = op.outputs[0]
# First, perform groupby and aggregation on each chunk.
agg_chunks = []
for chunk in in_df.chunks:
agg_op = op.copy().reset_key()
agg_op._stage = Stage.agg
agg_chunk = agg_op.new_chunk(
[chunk],
shape=out_df.shape,
index=chunk.index,
index_value=out_df.index_value,
columns_value=out_df.columns,
)
agg_chunks.append(agg_chunk)
# Shuffle the aggregation chunk.
reduce_chunks = cls._gen_shuffle_chunks(op, in_df, agg_chunks)
# Combine groups
combine_chunks = []
for chunk in reduce_chunks:
combine_op = op.copy().reset_key()
combine_op._stage = Stage.combine
combine_chunk = combine_op.new_chunk(
[chunk],
shape=out_df.shape,
index=chunk.index,
index_value=out_df.index_value,
columns_value=out_df.columns,
)
combine_chunks.append(combine_chunk)
new_op = op.copy()
return new_op.new_dataframes(
[in_df],
shape=out_df.shape,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=combine_chunks,
nsplits=((np.nan,) * len(combine_chunks), (out_df.shape[1],)),
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def __call__(self, df):
if self.col_names is not None:
# if col_names is a list, return a DataFrame, else return a Series
if isinstance(self._col_names, list):
dtypes = df.dtypes[self._col_names]
columns = parse_index(pd.Index(self._col_names), store_data=True)
return self.new_dataframe(
[df],
shape=(df.shape[0], len(self._col_names)),
dtypes=dtypes,
index_value=df.index_value,
columns_value=columns,
)
else:
dtype = df.dtypes[self._col_names]
return self.new_series(
[df],
shape=(df.shape[0],),
dtype=dtype,
index_value=df.index_value,
name=self._col_names,
)
else:
if isinstance(self.mask, SERIES_TYPE):
index_value = parse_index(
pd.Index([], dtype=df.index_value.to_pandas().dtype),
key=tokenize(
df.key, self.mask.key, df.index_value.key, self.mask.index_value.key
),
)
return self.new_dataframe(
[df, self._mask],
shape=(np.nan, df.shape[1]),
dtypes=df.dtypes,
index_value=index_value,
columns_value=df.columns_value,
)
else:
index_value = parse_index(
pd.Index([], dtype=df.index_value.to_pandas().dtype),
key=tokenize(
df.key,
pd.util.hash_pandas_object(self.mask),
df.index_value.key,
parse_index(self.mask.index).key,
),
)
return self.new_dataframe(
[df],
shape=(np.nan, df.shape[1]),
dtypes=df.dtypes,
index_value=index_value,
columns_value=df.columns_value,
)
|
def __call__(self, df):
if self.col_names is not None:
# if col_names is a list, return a DataFrame, else return a Series
if isinstance(self._col_names, list):
dtypes = df.dtypes[self._col_names]
columns = parse_index(pd.Index(self._col_names), store_data=True)
return self.new_dataframe(
[df],
shape=(df.shape[0], len(self._col_names)),
dtypes=dtypes,
index_value=df.index_value,
columns_value=columns,
)
else:
dtype = df.dtypes[self._col_names]
return self.new_series(
[df],
shape=(df.shape[0],),
dtype=dtype,
index_value=df.index_value,
name=self._col_names,
)
else:
if isinstance(self.mask, SERIES_TYPE):
index_value = parse_index(
pd.Index([], dtype=df.index_value.to_pandas().dtype),
key=tokenize(
df.key, self.mask.key, df.index_value.key, self.mask.index_value.key
),
)
return self.new_dataframe(
[df, self._mask],
shape=(np.nan, df.shape[1]),
dtypes=df.dtypes,
index_value=index_value,
columns_value=df.columns,
)
else:
index_value = parse_index(
pd.Index([], dtype=df.index_value.to_pandas().dtype),
key=tokenize(
df.key,
pd.util.hash_pandas_object(self.mask),
df.index_value.key,
parse_index(self.mask.index).key,
),
)
return self.new_dataframe(
[df],
shape=(np.nan, df.shape[1]),
dtypes=df.dtypes,
index_value=index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile_with_mask(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
out_chunks = []
if isinstance(op.mask, SERIES_TYPE):
mask = op.inputs[1]
nsplits, out_shape, df_chunks, mask_chunks = align_dataframe_series(
in_df, mask, axis="index"
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for idx, df_chunk in zip(out_chunk_indexes, df_chunks):
mask_chunk = mask_chunks[df_chunk.index[0]]
out_chunk = (
op.copy()
.reset_key()
.new_chunk(
[df_chunk, mask_chunk],
shape=(np.nan, df_chunk.shape[1]),
index=idx,
index_value=df_chunk.index_value,
columns_value=df_chunk.columns_value,
)
)
out_chunks.append(out_chunk)
else:
nsplits_acc = np.cumsum((0,) + in_df.nsplits[0])
for idx in range(in_df.chunk_shape[0]):
for idxj in range(in_df.chunk_shape[1]):
in_chunk = in_df.cix[idx, idxj]
chunk_op = op.copy().reset_key()
chunk_op._mask = op.mask.iloc[nsplits_acc[idx] : nsplits_acc[idx + 1]]
out_chunk = chunk_op.new_chunk(
[in_chunk],
index=in_chunk.index,
shape=(np.nan, in_chunk.shape[1]),
dtypes=in_chunk.dtypes,
index_value=in_df.index_value,
columns_value=in_chunk.columns_value,
)
out_chunks.append(out_chunk)
nsplits = ((np.nan,) * in_df.chunk_shape[0], in_df.nsplits[1])
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=nsplits,
)
|
def tile_with_mask(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
out_chunks = []
if isinstance(op.mask, SERIES_TYPE):
mask = op.inputs[1]
nsplits, out_shape, df_chunks, mask_chunks = align_dataframe_series(
in_df, mask, axis="index"
)
out_chunk_indexes = itertools.product(*(range(s) for s in out_shape))
out_chunks = []
for idx, df_chunk in zip(out_chunk_indexes, df_chunks):
mask_chunk = mask_chunks[df_chunk.index[0]]
out_chunk = (
op.copy()
.reset_key()
.new_chunk(
[df_chunk, mask_chunk],
shape=(np.nan, df_chunk.shape[1]),
index=idx,
index_value=df_chunk.index_value,
columns_value=df_chunk.columns,
)
)
out_chunks.append(out_chunk)
else:
nsplits_acc = np.cumsum((0,) + in_df.nsplits[0])
for idx in range(in_df.chunk_shape[0]):
for idxj in range(in_df.chunk_shape[1]):
in_chunk = in_df.cix[idx, idxj]
chunk_op = op.copy().reset_key()
chunk_op._mask = op.mask.iloc[nsplits_acc[idx] : nsplits_acc[idx + 1]]
out_chunk = chunk_op.new_chunk(
[in_chunk],
index=in_chunk.index,
shape=(np.nan, in_chunk.shape[1]),
dtypes=in_chunk.dtypes,
index_value=in_df.index_value,
columns_value=in_chunk.columns,
)
out_chunks.append(out_chunk)
nsplits = ((np.nan,) * in_df.chunk_shape[0], in_df.nsplits[1])
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=nsplits,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile_with_columns(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
col_names = op.col_names
if not isinstance(col_names, list):
column_index = calc_columns_index(col_names, in_df)
out_chunks = []
dtype = in_df.dtypes[col_names]
for i in range(in_df.chunk_shape[0]):
c = in_df.cix[(i, column_index)]
op = DataFrameIndex(col_names=col_names)
out_chunks.append(
op.new_chunk(
[c],
shape=(c.shape[0],),
index=(i,),
dtype=dtype,
index_value=c.index_value,
name=col_names,
)
)
new_op = op.copy()
return new_op.new_seriess(
op.inputs,
shape=out_df.shape,
dtype=out_df.dtype,
index_value=out_df.index_value,
name=out_df.name,
nsplits=(in_df.nsplits[0],),
chunks=out_chunks,
)
else:
# combine columns into one chunk and keep the columns order at the same time.
# When chunk columns are ['c1', 'c2', 'c3'], ['c4', 'c5'],
# selected columns are ['c2', 'c3', 'c4', 'c2'], `column_splits` will be
# [(['c2', 'c3'], 0), ('c4', 1), ('c2', 0)].
selected_index = [calc_columns_index(col, in_df) for col in col_names]
condition = np.where(np.diff(selected_index))[0] + 1
column_splits = np.split(col_names, condition)
column_indexes = np.split(selected_index, condition)
out_chunks = [[] for _ in range(in_df.chunk_shape[0])]
column_nsplits = []
for i, (columns, column_idx) in enumerate(zip(column_splits, column_indexes)):
dtypes = in_df.dtypes[columns]
column_nsplits.append(len(columns))
for j in range(in_df.chunk_shape[0]):
c = in_df.cix[(j, column_idx[0])]
index_op = DataFrameIndex(
col_names=list(columns), object_type=ObjectType.dataframe
)
out_chunk = index_op.new_chunk(
[c],
shape=(c.shape[0], len(columns)),
index=(j, i),
dtypes=dtypes,
index_value=c.index_value,
columns_value=parse_index(pd.Index(columns), store_data=True),
)
out_chunks[j].append(out_chunk)
out_chunks = [item for l in out_chunks for item in l]
new_op = op.copy()
nsplits = (in_df.nsplits[0], tuple(column_nsplits))
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=nsplits,
)
|
def tile_with_columns(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
col_names = op.col_names
if not isinstance(col_names, list):
column_index = calc_columns_index(col_names, in_df)
out_chunks = []
dtype = in_df.dtypes[col_names]
for i in range(in_df.chunk_shape[0]):
c = in_df.cix[(i, column_index)]
op = DataFrameIndex(col_names=col_names)
out_chunks.append(
op.new_chunk(
[c],
shape=(c.shape[0],),
index=(i,),
dtype=dtype,
index_value=c.index_value,
name=col_names,
)
)
new_op = op.copy()
return new_op.new_seriess(
op.inputs,
shape=out_df.shape,
dtype=out_df.dtype,
index_value=out_df.index_value,
name=out_df.name,
nsplits=(in_df.nsplits[0],),
chunks=out_chunks,
)
else:
# combine columns into one chunk and keep the columns order at the same time.
# When chunk columns are ['c1', 'c2', 'c3'], ['c4', 'c5'],
# selected columns are ['c2', 'c3', 'c4', 'c2'], `column_splits` will be
# [(['c2', 'c3'], 0), ('c4', 1), ('c2', 0)].
selected_index = [calc_columns_index(col, in_df) for col in col_names]
condition = np.where(np.diff(selected_index))[0] + 1
column_splits = np.split(col_names, condition)
column_indexes = np.split(selected_index, condition)
out_chunks = [[] for _ in range(in_df.chunk_shape[0])]
column_nsplits = []
for i, (columns, column_idx) in enumerate(zip(column_splits, column_indexes)):
dtypes = in_df.dtypes[columns]
column_nsplits.append(len(columns))
for j in range(in_df.chunk_shape[0]):
c = in_df.cix[(j, column_idx[0])]
index_op = DataFrameIndex(
col_names=list(columns), object_type=ObjectType.dataframe
)
out_chunk = index_op.new_chunk(
[c],
shape=(c.shape[0], len(columns)),
index=(j, i),
dtypes=dtypes,
index_value=c.index_value,
columns_value=parse_index(pd.Index(columns), store_data=True),
)
out_chunks[j].append(out_chunk)
out_chunks = [item for l in out_chunks for item in l]
new_op = op.copy()
nsplits = (in_df.nsplits[0], tuple(column_nsplits))
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=nsplits,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def dataframe_getitem(df, item):
columns = df.columns_value.to_pandas()
if isinstance(item, list):
for col_name in item:
if col_name not in columns:
raise KeyError("%s not in columns" % col_name)
op = DataFrameIndex(col_names=item, object_type=ObjectType.dataframe)
elif isinstance(item, SERIES_TYPE) and item.dtype == np.bool:
op = DataFrameIndex(mask=item, object_type=ObjectType.dataframe)
elif isinstance(item, pd.Series) and item.dtype == np.bool:
op = DataFrameIndex(mask=item, object_type=ObjectType.dataframe)
else:
if item not in columns:
raise KeyError("%s not in columns" % item)
op = DataFrameIndex(col_names=item)
return op(df)
|
def dataframe_getitem(df, item):
columns = df.columns.to_pandas()
if isinstance(item, list):
for col_name in item:
if col_name not in columns:
raise KeyError("%s not in columns" % col_name)
op = DataFrameIndex(col_names=item, object_type=ObjectType.dataframe)
elif isinstance(item, SERIES_TYPE) and item.dtype == np.bool:
op = DataFrameIndex(mask=item, object_type=ObjectType.dataframe)
elif isinstance(item, pd.Series) and item.dtype == np.bool:
op = DataFrameIndex(mask=item, object_type=ObjectType.dataframe)
else:
if item not in columns:
raise KeyError("%s not in columns" % item)
op = DataFrameIndex(col_names=item)
return op(df)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def __call__(self, df):
# Note [Fancy Index of Numpy and Pandas]
#
# The numpy and pandas.iloc have different semantic when processing fancy index:
#
# >>> np.ones((3,3))[[1,2],[1,2]]
# array([1., 1.])
#
# >>> pd.DataFrame(np.ones((3,3))).iloc[[1,2],[1,2]]
# 1 2
# 1 1.0 1.0
# 2 1.0 1.0
#
# Thus, we processing the index along two axis of DataFrame seperately.
if isinstance(self.indexes[0], TENSOR_TYPE) or isinstance(
self.indexes[1], TENSOR_TYPE
):
raise NotImplementedError("The index value cannot be unexecuted mars tensor")
shape0 = tuple(calc_shape((df.shape[0],), (self.indexes[0],)))
shape1 = tuple(calc_shape((df.shape[1],), (self.indexes[1],)))
# NB: pandas only compresses the result to series when index on one of axis is integral
if isinstance(self.indexes[1], Integral):
shape = shape0
dtype = df.dtypes.iloc[self.indexes[1]]
index_value = indexing_index_value(df.index_value, self.indexes[0])
self._object_type = ObjectType.series
return self.new_series([df], shape=shape, dtype=dtype, index_value=index_value)
elif isinstance(self.indexes[0], Integral):
shape = shape1
dtype = find_common_type(df.dtypes.iloc[self.indexes[1]].values)
index_value = indexing_index_value(df.columns_value, self.indexes[1])
self._object_type = ObjectType.series
return self.new_series([df], shape=shape, dtype=dtype, index_value=index_value)
else:
return self.new_dataframe(
[df],
shape=shape0 + shape1,
dtypes=df.dtypes.iloc[self.indexes[1]],
index_value=indexing_index_value(df.index_value, self.indexes[0]),
columns_value=indexing_index_value(
df.columns_value, self.indexes[1], store_data=True
),
)
|
def __call__(self, df):
# Note [Fancy Index of Numpy and Pandas]
#
# The numpy and pandas.iloc have different semantic when processing fancy index:
#
# >>> np.ones((3,3))[[1,2],[1,2]]
# array([1., 1.])
#
# >>> pd.DataFrame(np.ones((3,3))).iloc[[1,2],[1,2]]
# 1 2
# 1 1.0 1.0
# 2 1.0 1.0
#
# Thus, we processing the index along two axis of DataFrame seperately.
if isinstance(self.indexes[0], TENSOR_TYPE) or isinstance(
self.indexes[1], TENSOR_TYPE
):
raise NotImplementedError("The index value cannot be unexecuted mars tensor")
shape0 = tuple(calc_shape((df.shape[0],), (self.indexes[0],)))
shape1 = tuple(calc_shape((df.shape[1],), (self.indexes[1],)))
# NB: pandas only compresses the result to series when index on one of axis is integral
if isinstance(self.indexes[1], Integral):
shape = shape0
dtype = df.dtypes.iloc[self.indexes[1]]
index_value = indexing_index_value(df.index_value, self.indexes[0])
self._object_type = ObjectType.series
return self.new_series([df], shape=shape, dtype=dtype, index_value=index_value)
elif isinstance(self.indexes[0], Integral):
shape = shape1
dtype = find_common_type(df.dtypes.iloc[self.indexes[1]].values)
index_value = indexing_index_value(df.columns, self.indexes[1])
self._object_type = ObjectType.series
return self.new_series([df], shape=shape, dtype=dtype, index_value=index_value)
else:
return self.new_dataframe(
[df],
shape=shape0 + shape1,
dtypes=df.dtypes.iloc[self.indexes[1]],
index_value=indexing_index_value(df.index_value, self.indexes[0]),
columns_value=indexing_index_value(
df.columns, self.indexes[1], store_data=True
),
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
in_df = op.inputs[0]
out_val = op.outputs[0]
# See Note [Fancy Index of Numpy and Pandas]
tensor0 = empty(in_df.shape[0], chunk_size=(in_df.nsplits[0],))[
op.indexes[0]
].tiles()
tensor1 = empty(in_df.shape[1], chunk_size=(in_df.nsplits[1],))[
op.indexes[1]
].tiles()
integral_index_on_index = isinstance(op.indexes[0], Integral)
integral_index_on_column = isinstance(op.indexes[1], Integral)
out_chunks = []
for index_chunk, column_chunk in itertools.product(tensor0.chunks, tensor1.chunks):
in_chunk = in_df.cix[index_chunk.inputs[0].index + column_chunk.inputs[0].index]
chunk_op = op.copy().reset_key()
chunk_op._indexes = (index_chunk.op.indexes[0], column_chunk.op.indexes[0])
if integral_index_on_column:
shape = index_chunk.shape
index = index_chunk.index
index_value = indexing_index_value(
in_chunk.index_value, index_chunk.op.indexes[0]
)
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=shape,
index=index,
dtype=out_val.dtype,
index_value=index_value,
)
elif integral_index_on_index:
shape = column_chunk.shape
index = column_chunk.index
index_value = indexing_index_value(
in_chunk.columns_value, column_chunk.op.indexes[0]
)
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=shape,
index=index,
dtype=out_val.dtype,
index_value=index_value,
)
else:
index_value = indexing_index_value(
in_chunk.index_value, index_chunk.op.indexes[0]
)
columns_value = indexing_index_value(
in_chunk.columns_value, column_chunk.op.indexes[0], store_data=True
)
dtypes = in_chunk.dtypes.iloc[column_chunk.op.indexes[0]]
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=index_chunk.shape + column_chunk.shape,
index=index_chunk.index + column_chunk.index,
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
if integral_index_on_column or integral_index_on_index:
if integral_index_on_column:
nsplits = tensor0.nsplits
else:
nsplits = tensor1.nsplits
return new_op.new_seriess(
op.inputs,
out_val.shape,
dtype=out_val.dtype,
index_value=out_val.index_value,
chunks=out_chunks,
nsplits=nsplits,
)
else:
nsplits = tensor0.nsplits + tensor1.nsplits
return new_op.new_dataframes(
op.inputs,
out_val.shape,
dtypes=out_val.dtypes,
index_value=out_val.index_value,
columns_value=out_val.columns_value,
chunks=out_chunks,
nsplits=nsplits,
)
|
def tile(cls, op):
in_df = op.inputs[0]
out_val = op.outputs[0]
# See Note [Fancy Index of Numpy and Pandas]
tensor0 = empty(in_df.shape[0], chunk_size=(in_df.nsplits[0],))[
op.indexes[0]
].tiles()
tensor1 = empty(in_df.shape[1], chunk_size=(in_df.nsplits[1],))[
op.indexes[1]
].tiles()
integral_index_on_index = isinstance(op.indexes[0], Integral)
integral_index_on_column = isinstance(op.indexes[1], Integral)
out_chunks = []
for index_chunk, column_chunk in itertools.product(tensor0.chunks, tensor1.chunks):
in_chunk = in_df.cix[index_chunk.inputs[0].index + column_chunk.inputs[0].index]
chunk_op = op.copy().reset_key()
chunk_op._indexes = (index_chunk.op.indexes[0], column_chunk.op.indexes[0])
if integral_index_on_column:
shape = index_chunk.shape
index = index_chunk.index
index_value = indexing_index_value(
in_chunk.index_value, index_chunk.op.indexes[0]
)
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=shape,
index=index,
dtype=out_val.dtype,
index_value=index_value,
)
elif integral_index_on_index:
shape = column_chunk.shape
index = column_chunk.index
index_value = indexing_index_value(
in_chunk.columns, column_chunk.op.indexes[0]
)
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=shape,
index=index,
dtype=out_val.dtype,
index_value=index_value,
)
else:
index_value = indexing_index_value(
in_chunk.index_value, index_chunk.op.indexes[0]
)
columns_value = indexing_index_value(
in_chunk.columns, column_chunk.op.indexes[0], store_data=True
)
dtypes = in_chunk.dtypes.iloc[column_chunk.op.indexes[0]]
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=index_chunk.shape + column_chunk.shape,
index=index_chunk.index + column_chunk.index,
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
if integral_index_on_column or integral_index_on_index:
if integral_index_on_column:
nsplits = tensor0.nsplits
else:
nsplits = tensor1.nsplits
return new_op.new_seriess(
op.inputs,
out_val.shape,
dtype=out_val.dtype,
index_value=out_val.index_value,
chunks=out_chunks,
nsplits=nsplits,
)
else:
nsplits = tensor0.nsplits + tensor1.nsplits
return new_op.new_dataframes(
op.inputs,
out_val.shape,
dtypes=out_val.dtypes,
index_value=out_val.index_value,
columns_value=out_val.columns,
chunks=out_chunks,
nsplits=nsplits,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def __call__(self, df):
if isinstance(self.indexes[0], TENSOR_TYPE) or isinstance(
self.indexes[1], TENSOR_TYPE
):
raise NotImplementedError("The index value cannot be unexecuted mars tensor")
return self.new_dataframe(
[df],
shape=df.shape,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
def __call__(self, df):
if isinstance(self.indexes[0], TENSOR_TYPE) or isinstance(
self.indexes[1], TENSOR_TYPE
):
raise NotImplementedError("The index value cannot be unexecuted mars tensor")
return self.new_dataframe(
[df],
shape=df.shape,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
# See Note [Fancy Index of Numpy and Pandas]
tensor0 = empty(in_df.shape[0], chunk_size=(in_df.nsplits[0],))[
op.indexes[0]
].tiles()
tensor1 = empty(in_df.shape[1], chunk_size=(in_df.nsplits[1],))[
op.indexes[1]
].tiles()
chunk_mapping = {
c0.inputs[0].index + c1.inputs[0].index: (c0, c1)
for c0, c1 in itertools.product(tensor0.chunks, tensor1.chunks)
}
out_chunks = []
for chunk in in_df.chunks:
if chunk.index not in chunk_mapping:
out_chunks.append(chunk)
else:
chunk_op = op.copy().reset_key()
index_chunk, column_chunk = chunk_mapping[chunk.index]
chunk_op._indexes = (index_chunk.op.indexes[0], column_chunk.op.indexes[0])
chunk_op._value = op.value
out_chunk = chunk_op.new_chunk(
[chunk],
shape=chunk.shape,
index=chunk.index,
dtypes=chunk.dtypes,
index_value=chunk.index_value,
columns_value=chunk.columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=in_df.nsplits,
)
|
def tile(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
# See Note [Fancy Index of Numpy and Pandas]
tensor0 = empty(in_df.shape[0], chunk_size=(in_df.nsplits[0],))[
op.indexes[0]
].tiles()
tensor1 = empty(in_df.shape[1], chunk_size=(in_df.nsplits[1],))[
op.indexes[1]
].tiles()
chunk_mapping = {
c0.inputs[0].index + c1.inputs[0].index: (c0, c1)
for c0, c1 in itertools.product(tensor0.chunks, tensor1.chunks)
}
out_chunks = []
for chunk in in_df.chunks:
if chunk.index not in chunk_mapping:
out_chunks.append(chunk)
else:
chunk_op = op.copy().reset_key()
index_chunk, column_chunk = chunk_mapping[chunk.index]
chunk_op._indexes = (index_chunk.op.indexes[0], column_chunk.op.indexes[0])
chunk_op._value = op.value
out_chunk = chunk_op.new_chunk(
[chunk],
shape=chunk.shape,
index=chunk.index,
dtypes=chunk.dtypes,
index_value=chunk.index_value,
columns_value=chunk.columns,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
shape=out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=in_df.nsplits,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
if not isinstance(op.keys, six.string_types):
raise NotImplementedError("DataFrame.set_index only support label")
if op.verify_integrity:
raise NotImplementedError(
"DataFrame.set_index not support verify_integrity yet"
)
out_chunks = []
try:
column_index = in_df.columns_value.to_pandas().get_loc(op.keys)
except KeyError:
raise NotImplementedError(
"The new index label must be a column of the original dataframe"
)
chunk_index = np.searchsorted(np.cumsum(in_df.nsplits[1]), column_index + 1)
for row_idx in range(in_df.chunk_shape[0]):
index_chunk = in_df.cix[row_idx, chunk_index]
for col_idx in range(in_df.chunk_shape[1]):
input_chunk = in_df.cix[row_idx, col_idx]
if op.drop and input_chunk.key == index_chunk.key:
new_shape = (input_chunk.shape[0], input_chunk.shape[1] - 1)
columns = parse_index(
input_chunk.columns_value.to_pandas().drop(op.keys), store_data=True
)
else:
new_shape = input_chunk.shape
columns = input_chunk.columns_value
out_op = op.copy().reset_key()
out_chunk = out_op.new_chunk(
[index_chunk, input_chunk],
shape=new_shape,
dtypes=out_df.dtypes,
index=input_chunk.index,
index_value=parse_index(pd.Int64Index([])),
columns_value=columns,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns_value,
chunks=out_chunks,
nsplits=in_df.nsplits,
)
|
def tile(cls, op):
in_df = op.inputs[0]
out_df = op.outputs[0]
if not isinstance(op.keys, six.string_types):
raise NotImplementedError("DataFrame.set_index only support label")
if op.verify_integrity:
raise NotImplementedError(
"DataFrame.set_index not support verify_integrity yet"
)
out_chunks = []
try:
column_index = in_df.columns.to_pandas().get_loc(op.keys)
except KeyError:
raise NotImplementedError(
"The new index label must be a column of the original dataframe"
)
chunk_index = np.searchsorted(np.cumsum(in_df.nsplits[1]), column_index + 1)
for row_idx in range(in_df.chunk_shape[0]):
index_chunk = in_df.cix[row_idx, chunk_index]
for col_idx in range(in_df.chunk_shape[1]):
input_chunk = in_df.cix[row_idx, col_idx]
if op.drop and input_chunk.key == index_chunk.key:
new_shape = (input_chunk.shape[0], input_chunk.shape[1] - 1)
columns = parse_index(
input_chunk.columns.to_pandas().drop(op.keys), store_data=True
)
else:
new_shape = input_chunk.shape
columns = input_chunk.columns
out_op = op.copy().reset_key()
out_chunk = out_op.new_chunk(
[index_chunk, input_chunk],
shape=new_shape,
dtypes=out_df.dtypes,
index=input_chunk.index,
index_value=parse_index(pd.Int64Index([])),
columns_value=columns,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=in_df.nsplits,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def calc_columns_index(column_name, df):
"""
Calculate the chunk index on the axis 1 according to the selected column.
:param column_name: selected column name
:param df: input tiled DataFrame
:return: chunk index on the columns axis
"""
column_nsplits = df.nsplits[1]
column_loc = df.columns_value.to_pandas().get_loc(column_name)
return np.searchsorted(np.cumsum(column_nsplits), column_loc + 1)
|
def calc_columns_index(column_name, df):
"""
Calculate the chunk index on the axis 1 according to the selected column.
:param column_name: selected column name
:param df: input tiled DataFrame
:return: chunk index on the columns axis
"""
column_nsplits = df.nsplits[1]
column_loc = df.columns.to_pandas().get_loc(column_name)
return np.searchsorted(np.cumsum(column_nsplits), column_loc + 1)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def execute(cls, ctx, op):
def _base_concat(chunk, inputs):
# auto generated concat when executing a DataFrame, Series or Index
if chunk.op.object_type == ObjectType.dataframe:
return _auto_concat_dataframe_chunks(chunk, inputs)
elif chunk.op.object_type == ObjectType.series:
return _auto_concat_series_chunks(chunk, inputs)
else:
raise TypeError(
"Only DataFrameChunk, SeriesChunk and IndexChunk "
"can be automatically concatenated"
)
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns_value, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
def _auto_concat_series_chunks(chunk, inputs):
# auto generated concat when executing a Series
if all(np.isscalar(inp) for inp in inputs):
return pd.Series(inputs)
else:
xdf = pd if isinstance(inputs[0], pd.Series) else cudf
if chunk.op.axis is not None:
concat = xdf.concat(inputs, axis=chunk.op.axis)
else:
concat = xdf.concat(inputs)
if getattr(chunk.index_value, "should_be_monotonic", False):
concat.sort_index(inplace=True)
return concat
chunk = op.outputs[0]
inputs = [ctx[input.key] for input in op.inputs]
if isinstance(inputs[0], tuple):
ctx[chunk.key] = tuple(
_base_concat(chunk, [input[i] for input in inputs])
for i in range(len(inputs[0]))
)
else:
ctx[chunk.key] = _base_concat(chunk, inputs)
|
def execute(cls, ctx, op):
def _base_concat(chunk, inputs):
# auto generated concat when executing a DataFrame, Series or Index
if chunk.op.object_type == ObjectType.dataframe:
return _auto_concat_dataframe_chunks(chunk, inputs)
elif chunk.op.object_type == ObjectType.series:
return _auto_concat_series_chunks(chunk, inputs)
else:
raise TypeError(
"Only DataFrameChunk, SeriesChunk and IndexChunk "
"can be automatically concatenated"
)
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
def _auto_concat_series_chunks(chunk, inputs):
# auto generated concat when executing a Series
if all(np.isscalar(inp) for inp in inputs):
return pd.Series(inputs)
else:
xdf = pd if isinstance(inputs[0], pd.Series) else cudf
if chunk.op.axis is not None:
concat = xdf.concat(inputs, axis=chunk.op.axis)
else:
concat = xdf.concat(inputs)
if getattr(chunk.index_value, "should_be_monotonic", False):
concat.sort_index(inplace=True)
return concat
chunk = op.outputs[0]
inputs = [ctx[input.key] for input in op.inputs]
if isinstance(inputs[0], tuple):
ctx[chunk.key] = tuple(
_base_concat(chunk, [input[i] for input in inputs])
for i in range(len(inputs[0]))
)
else:
ctx[chunk.key] = _base_concat(chunk, inputs)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns_value, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
|
def _auto_concat_dataframe_chunks(chunk, inputs):
if chunk.op.axis is not None:
return pd.concat(inputs, axis=op.axis)
# auto generated concat when executing a DataFrame
n_rows = max(inp.index[0] for inp in chunk.inputs) + 1
n_cols = int(len(inputs) // n_rows)
assert n_rows * n_cols == len(inputs)
xdf = pd if isinstance(inputs[0], pd.DataFrame) else cudf
concats = []
for i in range(n_rows):
concat = xdf.concat([inputs[i * n_cols + j] for j in range(n_cols)], axis=1)
concats.append(concat)
if xdf is pd:
# The `sort=False` is to suppress a `FutureWarning` of pandas, when the index or column of chunks to
# concatenate is not aligned, which may happens for certain ops.
#
# See also Note [Columns of Left Join] in test_merge_execution.py.
ret = xdf.concat(concats, sort=False)
else:
ret = xdf.concat(concats)
if getattr(chunk.index_value, "should_be_monotonic", False):
ret.sort_index(inplace=True)
if getattr(chunk.columns, "should_be_monotonic", False):
ret.sort_index(axis=1, inplace=True)
return ret
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _gen_shuffle_chunks(cls, op, out_shape, shuffle_on, df):
# gen map chunks
map_chunks = []
for chunk in df.chunks:
map_op = DataFrameMergeAlignMap(
shuffle_on=shuffle_on,
sparse=chunk.issparse(),
index_shuffle_size=out_shape[0],
)
map_chunks.append(
map_op.new_chunk(
[chunk],
shape=(np.nan, np.nan),
dtypes=chunk.dtypes,
index=chunk.index,
index_value=chunk.index_value,
columns_value=chunk.columns_value,
)
)
proxy_chunk = DataFrameShuffleProxy(object_type=ObjectType.dataframe).new_chunk(
map_chunks,
shape=(),
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
# gen reduce chunks
reduce_chunks = []
for out_idx in itertools.product(*(range(s) for s in out_shape)):
reduce_op = DataFrameMergeAlignReduce(
sparse=proxy_chunk.issparse(),
shuffle_key=",".join(str(idx) for idx in out_idx),
)
reduce_chunks.append(
reduce_op.new_chunk(
[proxy_chunk],
shape=(np.nan, np.nan),
dtypes=proxy_chunk.dtypes,
index=out_idx,
index_value=proxy_chunk.index_value,
columns_value=proxy_chunk.columns_value,
)
)
return reduce_chunks
|
def _gen_shuffle_chunks(cls, op, out_shape, shuffle_on, df):
# gen map chunks
map_chunks = []
for chunk in df.chunks:
map_op = DataFrameMergeAlignMap(
shuffle_on=shuffle_on,
sparse=chunk.issparse(),
index_shuffle_size=out_shape[0],
)
map_chunks.append(
map_op.new_chunk(
[chunk],
shape=(np.nan, np.nan),
dtypes=chunk.dtypes,
index=chunk.index,
index_value=chunk.index_value,
columns_value=chunk.columns,
)
)
proxy_chunk = DataFrameShuffleProxy(object_type=ObjectType.dataframe).new_chunk(
map_chunks,
shape=(),
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
# gen reduce chunks
reduce_chunks = []
for out_idx in itertools.product(*(range(s) for s in out_shape)):
reduce_op = DataFrameMergeAlignReduce(
sparse=proxy_chunk.issparse(),
shuffle_key=",".join(str(idx) for idx in out_idx),
)
reduce_chunks.append(
reduce_op.new_chunk(
[proxy_chunk],
shape=(np.nan, np.nan),
dtypes=proxy_chunk.dtypes,
index=out_idx,
index_value=proxy_chunk.index_value,
columns_value=proxy_chunk.columns,
)
)
return reduce_chunks
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
df = op.outputs[0]
left = build_concated_rows_frame(op.inputs[0])
right = build_concated_rows_frame(op.inputs[1])
# left and right now are guaranteed only chunked along index axis, not column axis.
assert left.chunk_shape[1] == 1
assert right.chunk_shape[1] == 1
left_row_chunk_size = left.chunk_shape[0]
right_row_chunk_size = right.chunk_shape[0]
out_row_chunk_size = max(left_row_chunk_size, right_row_chunk_size)
out_chunk_shape = (out_row_chunk_size, 1)
nsplits = [[np.nan for _ in range(out_row_chunk_size)], [df.shape[1]]]
left_on = _prepare_shuffle_on(op.left_index, op.left_on, op.on)
right_on = _prepare_shuffle_on(op.right_index, op.right_on, op.on)
# do shuffle
left_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, left_on, left)
right_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, right_on, right)
out_chunks = []
for left_chunk, right_chunk in zip(left_chunks, right_chunks):
merge_op = op.copy().reset_key()
out_chunk = merge_op.new_chunk(
[left_chunk, right_chunk],
shape=(np.nan, df.shape[1]),
index=left_chunk.index,
index_value=infer_index_value(
left_chunk.index_value, right_chunk.index_value
),
columns_value=df.columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
def tile(cls, op):
df = op.outputs[0]
left = build_concated_rows_frame(op.inputs[0])
right = build_concated_rows_frame(op.inputs[1])
# left and right now are guaranteed only chunked along index axis, not column axis.
assert left.chunk_shape[1] == 1
assert right.chunk_shape[1] == 1
left_row_chunk_size = left.chunk_shape[0]
right_row_chunk_size = right.chunk_shape[0]
out_row_chunk_size = max(left_row_chunk_size, right_row_chunk_size)
out_chunk_shape = (out_row_chunk_size, 1)
nsplits = [[np.nan for _ in range(out_row_chunk_size)], [df.shape[1]]]
left_on = _prepare_shuffle_on(op.left_index, op.left_on, op.on)
right_on = _prepare_shuffle_on(op.right_index, op.right_on, op.on)
# do shuffle
left_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, left_on, left)
right_chunks = cls._gen_shuffle_chunks(op, out_chunk_shape, right_on, right)
out_chunks = []
for left_chunk, right_chunk in zip(left_chunks, right_chunks):
merge_op = op.copy().reset_key()
out_chunk = merge_op.new_chunk(
[left_chunk, right_chunk],
shape=(np.nan, df.shape[1]),
index=left_chunk.index,
index_value=infer_index_value(
left_chunk.index_value, right_chunk.index_value
),
columns_value=df.columns,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def concat_tileable_chunks(cls, tileable):
from .merge.concat import DataFrameConcat, GroupByConcat
from .operands import ObjectType, DATAFRAME_TYPE, SERIES_TYPE, GROUPBY_TYPE
df = tileable
assert not df.is_coarse()
if isinstance(df, DATAFRAME_TYPE):
chunk = DataFrameConcat(object_type=ObjectType.dataframe).new_chunk(
df.chunks,
shape=df.shape,
index=(0, 0),
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
return DataFrameConcat(object_type=ObjectType.dataframe).new_dataframe(
[df],
shape=df.shape,
chunks=[chunk],
nsplits=tuple((s,) for s in df.shape),
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
elif isinstance(df, SERIES_TYPE):
chunk = DataFrameConcat(object_type=ObjectType.series).new_chunk(
df.chunks,
shape=df.shape,
index=(0,),
dtype=df.dtype,
index_value=df.index_value,
name=df.name,
)
return DataFrameConcat(object_type=ObjectType.series).new_series(
[df],
shape=df.shape,
chunks=[chunk],
nsplits=tuple((s,) for s in df.shape),
dtype=df.dtype,
index_value=df.index_value,
name=df.name,
)
elif isinstance(df, GROUPBY_TYPE):
chunk = GroupByConcat(by=df.op.by, object_type=ObjectType.dataframe).new_chunk(
df.chunks
)
return GroupByConcat(
by=df.op.by, object_type=ObjectType.dataframe
).new_dataframe([df], chunks=[chunk])
else:
raise NotImplementedError
|
def concat_tileable_chunks(cls, tileable):
from .merge.concat import DataFrameConcat, GroupByConcat
from .operands import ObjectType, DATAFRAME_TYPE, SERIES_TYPE, GROUPBY_TYPE
df = tileable
assert not df.is_coarse()
if isinstance(df, DATAFRAME_TYPE):
chunk = DataFrameConcat(object_type=ObjectType.dataframe).new_chunk(
df.chunks,
shape=df.shape,
index=(0, 0),
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
return DataFrameConcat(object_type=ObjectType.dataframe).new_dataframe(
[df],
shape=df.shape,
chunks=[chunk],
nsplits=tuple((s,) for s in df.shape),
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
elif isinstance(df, SERIES_TYPE):
chunk = DataFrameConcat(object_type=ObjectType.series).new_chunk(
df.chunks,
shape=df.shape,
index=(0,),
dtype=df.dtype,
index_value=df.index_value,
name=df.name,
)
return DataFrameConcat(object_type=ObjectType.series).new_series(
[df],
shape=df.shape,
chunks=[chunk],
nsplits=tuple((s,) for s in df.shape),
dtype=df.dtype,
index_value=df.index_value,
name=df.name,
)
elif isinstance(df, GROUPBY_TYPE):
chunk = GroupByConcat(by=df.op.by, object_type=ObjectType.dataframe).new_chunk(
df.chunks
)
return GroupByConcat(
by=df.op.by, object_type=ObjectType.dataframe
).new_dataframe([df], chunks=[chunk])
else:
raise NotImplementedError
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def split_monotonic_index_min_max(
left_min_max, left_increase, right_min_max, right_increase
):
"""
Split the original two min_max into new min_max. Each min_max should be a list
in which each item should be a 4-tuple indicates that this chunk's min value,
whether the min value is close, the max value, and whether the max value is close.
The return value would be a nested list, each item is a list
indicates that how this chunk should be split into.
:param left_min_max: the left min_max
:param left_increase: if the original data of left is increased
:param right_min_max: the right min_max
:param right_increase: if the original data of right is increased
:return: nested list in which each item indicates how min_max is split
>>> left_min_max = [(0, True, 3, True), (4, True, 8, True), (12, True, 18, True),
>>> (20, True, 22, True)]
>>> right_min_max = [(2, True, 6, True), (7, True, 9, True), (10, True, 14, True),
>>> (18, True, 19, True)]
>>> l, r = split_monotonic_index_min_max(left_min_max, True, right_min_max, True)
>>> l
[[(0, True, 2, False), (2, True, 3, True)], [(3, False, 4, False), (4, True, 6, True), (6, False, 7, False),
(7, True, 8, True)], [(8, False, 9, True), (10, True, 12, False), (12, True, 14, True), (14, False, 18, False),
(18, True, 18, True)], [(18, False, 19, True), [20, True, 22, True]]]
>>> r
[[(0, True, 2, False), (2, True, 3, True), (3, False, 4, False), (4, True, 6, True)],
[(6, False, 7, False), (7, True, 8, True), (8, False, 9, True)], [(10, True, 12, False), (12, True, 14, True)],
[(14, False, 18, False), (18, True, 18, True), (18, False, 19, True), [20, True, 22, True]]]
"""
left_idx_to_min_max = [[] for _ in left_min_max]
right_idx_to_min_max = [[] for _ in right_min_max]
left_curr_min_max = list(left_min_max[0])
right_curr_min_max = list(right_min_max[0])
left_curr_idx = right_curr_idx = 0
left_terminate = right_terminate = False
while not left_terminate or not right_terminate:
if left_terminate:
left_idx_to_min_max[left_curr_idx].append(tuple(right_curr_min_max))
right_idx_to_min_max[right_curr_idx].append(tuple(right_curr_min_max))
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
elif right_terminate:
right_idx_to_min_max[right_curr_idx].append(tuple(left_curr_min_max))
left_idx_to_min_max[left_curr_idx].append(tuple(left_curr_min_max))
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
elif left_curr_min_max[0] < right_curr_min_max[0]:
# left min < right min
right_min = [right_curr_min_max[0], not right_curr_min_max[1]]
max_val = min(left_curr_min_max[2:], right_min)
assert len(max_val) == 2
min_max = (
left_curr_min_max[0],
left_curr_min_max[1],
max_val[0],
max_val[1],
)
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if left_curr_min_max[2:] == max_val:
# left max < right min
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
left_curr_min_max[:2] = right_curr_min_max[:2]
elif left_curr_min_max[0] > right_curr_min_max[0]:
# left min > right min
left_min = [left_curr_min_max[0], not left_curr_min_max[1]]
max_val = min(right_curr_min_max[2:], left_min)
min_max = (
right_curr_min_max[0],
right_curr_min_max[1],
max_val[0],
max_val[1],
)
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if right_curr_min_max[2:] == max_val:
# right max < left min
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
right_curr_min_max[:2] = left_curr_min_max[:2]
else:
# left min == right min
max_val = min(left_curr_min_max[2:], right_curr_min_max[2:])
assert len(max_val) == 2
min_max = (
left_curr_min_max[0],
left_curr_min_max[1],
max_val[0],
max_val[1],
)
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if max_val == left_curr_min_max[2:]:
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
left_curr_min_max[:2] = max_val[0], not max_val[1]
if max_val == right_curr_min_max[2:]:
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
right_curr_min_max[:2] = max_val[0], not max_val[1]
if left_increase is False:
left_idx_to_min_max = list(reversed(left_idx_to_min_max))
if right_increase is False:
right_idx_to_min_max = list(reversed(right_idx_to_min_max))
return left_idx_to_min_max, right_idx_to_min_max
|
def split_monotonic_index_min_max(
left_min_max, left_increase, right_min_max, right_increase
):
"""
Split the original two min_max into new min_max. Each min_max should be a list
in which each item should be a 4-tuple indicates that this chunk's min value,
whether the min value is close, the max value, and whether the max value is close.
The return value would be a nested list, each item is a list
indicates that how this chunk should be split into.
:param left_min_max: the left min_max
:param left_increase: if the original data of left is increased
:param right_min_max: the right min_max
:param right_increase: if the original data of right is increased
:return: nested list in which each item indicates how min_max is split
>>> left_min_max = [(0, True, 3, True), (4, True, 8, True), (12, True, 18, True),
>>> (20, True, 22, True)]
>>> right_min_max = [(2, True, 6, True), (7, True, 9, True), (10, True, 14, True),
>>> (18, True, 19, True)]
>>> l, r = split_monotonic_index_min_max(left_min_max, True, right_min_max, True)
>>> l
[[(0, True, 2, False), (2, True, 3, True)], [(3, False, 4, False), (4, True, 6, True), (6, False, 7, False),
(7, True, 8, True)], [(8, False, 9, True), (10, True, 12, False), (12, True, 14, True), (14, False, 18, False),
(18, True, 18, True)], [(18, False, 19, True), [20, True, 22, True]]]
>>> r
[[(0, True, 2, False), (2, True, 3, True), (3, False, 4, False), (4, True, 6, True)],
[(6, False, 7, False), (7, True, 8, True), (8, False, 9, True)], [(10, True, 12, False), (12, True, 14, True)],
[(14, False, 18, False), (18, True, 18, True), (18, False, 19, True), [20, True, 22, True]]]
"""
left_idx_to_min_max = [[] for _ in left_min_max]
right_idx_to_min_max = [[] for _ in right_min_max]
left_curr_min_max = list(left_min_max[0])
right_curr_min_max = list(right_min_max[0])
left_curr_idx = right_curr_idx = 0
left_terminate = right_terminate = False
while not left_terminate or not right_terminate:
if left_terminate:
left_idx_to_min_max[left_curr_idx].append(tuple(right_curr_min_max))
right_idx_to_min_max[right_curr_idx].append(tuple(right_curr_min_max))
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
elif right_terminate:
right_idx_to_min_max[right_curr_idx].append(tuple(left_curr_min_max))
left_idx_to_min_max[left_curr_idx].append(tuple(left_curr_min_max))
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
elif left_curr_min_max[0] < right_curr_min_max[0]:
# left min < right min
right_min = [right_curr_min_max[0], not right_curr_min_max[1]]
max_val = min(left_curr_min_max[2:], right_min)
assert len(max_val) == 2
min_max = (
left_curr_min_max[0],
left_curr_min_max[1],
max_val[0],
max_val[1],
)
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if left_curr_min_max[2:] == max_val:
# left max < right min
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
left_curr_min_max[:2] = right_curr_min_max[:2]
elif left_curr_min_max[0] > right_curr_min_max[0]:
# left min > right min
left_min = [left_curr_min_max[0], not left_curr_min_max[1]]
max_val = min(right_curr_min_max[2:], left_min)
min_max = (
right_curr_min_max[0],
right_curr_min_max[1],
max_val[0],
max_val[1],
)
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if right_curr_min_max[2:] == max_val:
# right max < left min
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
right_curr_min_max[:2] = left_curr_min_max[:2]
else:
# left min == right min
max_val = min(left_curr_min_max[2:], right_curr_min_max[2:])
assert len(max_val) == 2
min_max = (
left_curr_min_max[0],
left_curr_min_max[1],
max_val[0],
max_val[1],
)
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if max_val == left_curr_min_max[2:]:
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
left_curr_min_max[:2] = max_val[0], not max_val[1]
if max_val == right_curr_min_max[2:]:
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
right_curr_min_max[:2] = max_val[0], not max_val[1]
if not left_increase:
left_idx_to_min_max = list(reversed(left_idx_to_min_max))
if not right_increase:
right_idx_to_min_max = list(reversed(right_idx_to_min_max))
return left_idx_to_min_max, right_idx_to_min_max
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def build_split_idx_to_origin_idx(splits, increase=True):
# splits' len is equal to the original chunk size on a specified axis,
# splits is sth like [[(0, True, 2, True), (2, False, 3, True)]]
# which means there is one input chunk, and will be split into 2 out chunks
# in this function, we want to build a new dict from the out chunk index to
# the original chunk index and the inner position, like {0: (0, 0), 1: (0, 1)}
if increase is False:
splits = list(reversed(splits))
out_idx = itertools.count(0)
res = dict()
for origin_idx, _ in enumerate(splits):
for pos in range(len(splits[origin_idx])):
if increase is False:
o_idx = len(splits) - origin_idx - 1
else:
o_idx = origin_idx
res[next(out_idx)] = o_idx, pos
return res
|
def build_split_idx_to_origin_idx(splits, increase=True):
# splits' len is equal to the original chunk size on a specified axis,
# splits is sth like [[(0, True, 2, True), (2, False, 3, True)]]
# which means there is one input chunk, and will be split into 2 out chunks
# in this function, we want to build a new dict from the out chunk index to
# the original chunk index and the inner position, like {0: (0, 0), 1: (0, 1)}
if not increase:
splits = list(reversed(splits))
out_idx = itertools.count(0)
res = dict()
for origin_idx, _ in enumerate(splits):
for pos in range(len(splits[origin_idx])):
if increase:
o_idx = origin_idx
else:
o_idx = len(splits) - origin_idx - 1
res[next(out_idx)] = o_idx, pos
return res
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def build_concated_rows_frame(df):
from .operands import ObjectType
from .merge.concat import DataFrameConcat
# When the df isn't splitted along the column axis, return the df directly.
if df.chunk_shape[1] == 1:
return df
columns = concat_index_value(
[df.cix[0, idx].columns_value for idx in range(df.chunk_shape[1])],
store_data=True,
)
columns_size = columns.to_pandas().size
out_chunks = []
for idx in range(df.chunk_shape[0]):
out_chunk = DataFrameConcat(axis=1, object_type=ObjectType.dataframe).new_chunk(
[df.cix[idx, k] for k in range(df.chunk_shape[1])],
index=(idx, 0),
shape=(df.cix[idx, 0].shape[0], columns_size),
dtypes=df.dtypes,
index_value=df.cix[idx, 0].index_value,
columns_value=columns,
)
out_chunks.append(out_chunk)
return DataFrameConcat(axis=1, object_type=ObjectType.dataframe).new_dataframe(
[df],
chunks=out_chunks,
nsplits=((chunk.shape[0] for chunk in out_chunks), (df.shape[1],)),
shape=df.shape,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns_value,
)
|
def build_concated_rows_frame(df):
from .operands import ObjectType
from .merge.concat import DataFrameConcat
# When the df isn't splitted along the column axis, return the df directly.
if df.chunk_shape[1] == 1:
return df
columns = concat_index_value(
[df.cix[0, idx].columns for idx in range(df.chunk_shape[1])], store_data=True
)
columns_size = columns.to_pandas().size
out_chunks = []
for idx in range(df.chunk_shape[0]):
out_chunk = DataFrameConcat(axis=1, object_type=ObjectType.dataframe).new_chunk(
[df.cix[idx, k] for k in range(df.chunk_shape[1])],
index=(idx, 0),
shape=(df.cix[idx, 0].shape[0], columns_size),
dtypes=df.dtypes,
index_value=df.cix[idx, 0].index_value,
columns_value=columns,
)
out_chunks.append(out_chunk)
return DataFrameConcat(axis=1, object_type=ObjectType.dataframe).new_dataframe(
[df],
chunks=out_chunks,
nsplits=((chunk.shape[0] for chunk in out_chunks), (df.shape[1],)),
shape=df.shape,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def execute_graph(
self,
graph,
keys,
n_parallel=None,
print_progress=False,
mock=False,
no_intermediate=False,
compose=True,
retval=True,
chunk_result=None,
):
"""
:param graph: graph to execute
:param keys: result keys
:param n_parallel: num of max parallelism
:param print_progress:
:param compose: if True. fuse nodes when possible
:param mock: if True, only estimate data sizes without execution
:param no_intermediate: exclude intermediate data sizes when estimating memory size
:param retval: if True, keys specified in argument keys is returned
:param chunk_result: dict to put chunk key to chunk data, if None, use self.chunk_result
:return: execution result
"""
optimized_graph = self._preprocess(graph, keys) if compose else graph
if not mock:
# fetch_keys only useful when calculating sizes
fetch_keys = set()
else:
fetch_keys = set(v.key for v in graph if isinstance(v.op, Fetch))
for c in graph:
if graph.count_predecessors(c) != 0:
continue
fetch_keys.update(inp.key for inp in c.inputs or ())
executed_keys = list(
itertools.chain(*[v[1] for v in self.stored_tileables.values()])
)
chunk_result = self._chunk_result if chunk_result is None else chunk_result
graph_execution = GraphExecution(
chunk_result,
optimized_graph,
keys,
executed_keys,
self._sync_provider,
n_parallel=n_parallel,
engine=self._engine,
prefetch=self._prefetch,
print_progress=print_progress,
mock=mock,
mock_max_memory=self._mock_max_memory,
fetch_keys=fetch_keys,
no_intermediate=no_intermediate,
)
res = graph_execution.execute(retval)
self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
if mock:
chunk_result.clear()
return res
|
def execute_graph(
self,
graph,
keys,
n_parallel=None,
print_progress=False,
mock=False,
no_intermediate=False,
compose=True,
retval=True,
chunk_result=None,
):
"""
:param graph: graph to execute
:param keys: result keys
:param n_parallel: num of max parallelism
:param print_progress:
:param compose: if True. fuse nodes when possible
:param mock: if True, only estimate data sizes without execution
:param no_intermediate: exclude intermediate data sizes when estimating memory size
:param retval: if True, keys specified in argument keys is returned
:param chunk_result: dict to put chunk key to chunk data, if None, use self.chunk_result
:return: execution result
"""
optimized_graph = self._preprocess(graph, keys) if compose else graph
if not mock:
# fetch_keys only useful when calculating sizes
fetch_keys = set()
else:
fetch_keys = set(v.key for v in graph if isinstance(v.op, Fetch))
for c in graph:
if graph.count_predecessors(c) != 0:
continue
fetch_keys.update(inp.key for inp in c.inputs or ())
executed_keys = list(
itertools.chain(*[v[1] for v in self.stored_tileables.values()])
)
chunk_result = self._chunk_result if chunk_result is None else chunk_result
graph_execution = GraphExecution(
chunk_result,
optimized_graph,
keys,
executed_keys,
self._sync_provider,
n_parallel=n_parallel,
engine=self._engine,
prefetch=self._prefetch,
print_progress=print_progress,
mock=mock,
mock_max_memory=self._mock_max_memory,
fetch_keys=fetch_keys,
no_intermediate=no_intermediate,
)
res = graph_execution.execute(retval)
self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
if mock:
self._chunk_result.clear()
return res
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def execute_tileable(
self,
tileable,
n_parallel=None,
n_thread=None,
concat=False,
print_progress=False,
mock=False,
compose=True,
):
if concat:
# only for tests
tileable.tiles()
if len(tileable.chunks) > 1:
tileable = tileable.op.concat_tileable_chunks(tileable)
# shallow copy
chunk_result = self._chunk_result.copy()
graph = tileable.build_graph(cls=DirectedGraph, tiled=True, compose=compose)
ret = self.execute_graph(
graph,
[c.key for c in tileable.chunks],
n_parallel=n_parallel or n_thread,
print_progress=print_progress,
mock=mock,
chunk_result=chunk_result,
)
self._chunk_result.update(chunk_result)
return ret
|
def execute_tileable(
self,
tileable,
n_parallel=None,
n_thread=None,
concat=False,
print_progress=False,
mock=False,
compose=True,
):
if concat:
# only for tests
tileable.tiles()
if len(tileable.chunks) > 1:
tileable = tileable.op.concat_tileable_chunks(tileable)
graph = tileable.build_graph(cls=DirectedGraph, tiled=True, compose=compose)
return self.execute_graph(
graph,
[c.key for c in tileable.chunks],
n_parallel=n_parallel or n_thread,
print_progress=print_progress,
mock=mock,
)
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def _get_kw(obj):
if isinstance(obj, TENSOR_TYPE + TENSOR_CHUNK_TYPE):
return {"shape": obj.shape, "dtype": obj.dtype, "order": obj.order}
else:
return {
"shape": obj.shape,
"dtypes": obj.dtypes,
"index_value": obj.index_value,
"columns_value": obj.columns_value,
}
|
def _get_kw(obj):
if isinstance(obj, TENSOR_TYPE + TENSOR_CHUNK_TYPE):
return {"shape": obj.shape, "dtype": obj.dtype, "order": obj.order}
else:
return {
"shape": obj.shape,
"dtypes": obj.dtypes,
"index_value": obj.index_value,
"columns_value": obj.columns,
}
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def tile(cls, op):
out = op.outputs[0]
out_chunks = []
data = op.data
if data.chunk_shape[1] > 1:
data = data.rechunk({1: op.data.shape[1]}).single_tiles()
for in_chunk in data.chunks:
chunk_op = op.copy().reset_key()
chunk_index = (in_chunk.index[0],)
if op.model.attr("num_class"):
chunk_shape = (len(in_chunk), 2)
chunk_index += (0,)
else:
chunk_shape = (len(in_chunk),)
if op.output_types[0] == OutputType.tensor:
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=chunk_shape,
dtype=out.dtype,
order=out.order,
index=chunk_index,
)
elif op.output_types[0] == OutputType.dataframe:
# dataframe chunk
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=chunk_shape,
dtypes=data.dtypes,
columns_value=data.columns_value,
index_value=in_chunk.index_value,
index=chunk_index,
)
else:
# series chunk
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=chunk_shape,
dtype=out.dtype,
index_value=in_chunk.index_value,
name=out.name,
index=chunk_index,
)
out_chunks.append(out_chunk)
new_op = op.copy()
params = out.params
params["chunks"] = out_chunks
nsplits = (data.nsplits[0],)
if out.ndim > 1:
nsplits += ((out.shape[1],),)
params["nsplits"] = nsplits
return new_op.new_tileables(op.inputs, kws=[params])
|
def tile(cls, op):
out = op.outputs[0]
out_chunks = []
data = op.data
if data.chunk_shape[1] > 1:
data = data.rechunk({1: op.data.shape[1]}).single_tiles()
for in_chunk in data.chunks:
chunk_op = op.copy().reset_key()
chunk_index = (in_chunk.index[0],)
if op.model.attr("num_class"):
chunk_shape = (len(in_chunk), 2)
chunk_index += (0,)
else:
chunk_shape = (len(in_chunk),)
if op.output_types[0] == OutputType.tensor:
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=chunk_shape,
dtype=out.dtype,
order=out.order,
index=chunk_index,
)
elif op.output_types[0] == OutputType.dataframe:
# dataframe chunk
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=chunk_shape,
dtypes=data.dtypes,
columns_value=data.columns,
index_value=in_chunk.index_value,
index=chunk_index,
)
else:
# series chunk
out_chunk = chunk_op.new_chunk(
[in_chunk],
shape=chunk_shape,
dtype=out.dtype,
index_value=in_chunk.index_value,
name=out.name,
index=chunk_index,
)
out_chunks.append(out_chunk)
new_op = op.copy()
params = out.params
params["chunks"] = out_chunks
nsplits = (data.nsplits[0],)
if out.ndim > 1:
nsplits += ((out.shape[1],),)
params["nsplits"] = nsplits
return new_op.new_tileables(op.inputs, kws=[params])
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def sort_dataframe_result(df, result):
"""sort DataFrame on client according to `should_be_monotonic` attribute"""
if hasattr(df, "index_value"):
if getattr(df.index_value, "should_be_monotonic", False):
result.sort_index(inplace=True)
if hasattr(df, "columns_value"):
if getattr(df.columns_value, "should_be_monotonic", False):
result.sort_index(axis=1, inplace=True)
return result
|
def sort_dataframe_result(df, result):
"""sort DataFrame on client according to `should_be_monotonic` attribute"""
if hasattr(df, "index_value"):
if getattr(df.index_value, "should_be_monotonic", False):
result.sort_index(inplace=True)
if hasattr(df, "columns"):
if getattr(df.columns, "should_be_monotonic", False):
result.sort_index(axis=1, inplace=True)
return result
|
https://github.com/mars-project/mars/issues/814
|
In [28]: df = md.DataFrame(np.random.rand(10, 2))
In [29]: df + np.random.rand(10, 2)
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-29-4a212df011a8> in <module>
----> 1 df + np.random.rand(10, 2)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrapper(*args, **kwargs)
613 def wrapper(*args, **kwargs):
614 try:
--> 615 return func(*args, **kwargs)
616 except NotImplementedError:
617 return NotImplemented
~/Documents/mars_dev/mars/mars/dataframe/arithmetic/add.py in add(df, other, axis, level, fill_value)
33
34 def add(df, other, axis='columns', level=None, fill_value=None):
---> 35 other = wrap_sequence(other)
36 op = DataFrameAdd(axis=axis, level=level, fill_value=fill_value, lhs=df, rhs=other)
37 return op(df, other)
~/Documents/mars_dev/mars/mars/dataframe/utils.py in wrap_sequence(seq)
606
607 if isinstance(seq, (list, tuple, np.ndarray, TENSOR_TYPE)):
--> 608 seq = Series(seq)
609 return seq
610
~/Documents/mars_dev/mars/mars/dataframe/initializer.py in __init__(self, data, index, dtype, name, copy, chunk_size, gpu, sparse)
46 series = data
47 else:
---> 48 pd_series = pd.Series(data, index=index, dtype=dtype, name=name, copy=copy)
49 series = from_pandas_series(pd_series, chunk_size=chunk_size, gpu=gpu, sparse=sparse)
50 super(Series, self).__init__(series.data)
~/miniconda3/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
312 data = data.copy()
313 else:
--> 314 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
315
316 data = SingleBlockManager(data, index, fastpath=True)
~/miniconda3/lib/python3.7/site-packages/pandas/core/internals/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
727 elif subarr.ndim > 1:
728 if isinstance(data, np.ndarray):
--> 729 raise Exception("Data must be 1-dimensional")
730 else:
731 subarr = com.asarray_tuplesafe(data, dtype=dtype)
Exception: Data must be 1-dimensional
|
Exception
|
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
kw = {"casting": op.casting} if op.out else {}
inputs_iter = iter(inputs)
lhs = op.lhs if np.isscalar(op.lhs) else next(inputs_iter)
rhs = op.rhs if np.isscalar(op.rhs) else next(inputs_iter)
if op.out:
kw["out"] = next(inputs_iter).copy()
if op.where:
kw["where"] = next(inputs_iter)
with np.errstate(**op.err):
if op.is_gpu():
ret = cls._execute_gpu(op, xp, lhs, rhs, **kw)
else:
ret = cls._execute_cpu(op, xp, lhs, rhs, **kw)
ctx[op.outputs[0].key] = _handle_out_dtype(ret, op.dtype)
|
def execute(cls, ctx, op):
func_name = getattr(cls, "_func_name")
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
func = getattr(xp, func_name)
with device(device_id):
kw = {"casting": op.casting} if op.out else {}
inputs_iter = iter(inputs)
lhs = op.lhs if np.isscalar(op.lhs) else next(inputs_iter)
rhs = op.rhs if np.isscalar(op.rhs) else next(inputs_iter)
if op.out:
kw["out"] = next(inputs_iter).copy()
if op.where:
kw["where"] = next(inputs_iter)
kw["order"] = op.order
with np.errstate(**op.err):
ctx[op.outputs[0].key] = _handle_out_dtype(func(lhs, rhs, **kw), op.dtype)
|
https://github.com/mars-project/mars/issues/728
|
In [13]: x = mt.random.rand(10, 10, gpu=True)
In [14]: (x + 1).execute()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-14-60cf72259190> in <module>
----> 1 (x + 1).execute()
~/mars/mars/tensor/core.py in execute(self, session, **kw)
450
451 def execute(self, session=None, **kw):
--> 452 return self._data.execute(session, **kw)
453
454
~/mars/mars/core.py in execute(self, session, **kw)
582 if session is None:
583 session = Session.default_or_local()
--> 584 return session.run(self, **kw)
585
586 def fetch(self, session=None, **kw):
~/mars/mars/session.py in run(self, *tileables, **kw)
150 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
151 for t in tileables)
--> 152 result = self._sess.run(*tileables, **kw)
153
154 for t in tileables:
~/mars/mars/session.py in run(self, *tileables, **kw)
61 if 'n_parallel' not in kw:
62 kw['n_parallel'] = cpu_count()
---> 63 res = self._executor.execute_tileables(tileables, **kw)
64 return res
65
~/mars/mars/utils.py in _wrapped(*args, **kwargs)
393 _kernel_mode.eager = False
394 _kernel_mode.eager_count = enter_eager_count + 1
--> 395 return func(*args, **kwargs)
396 finally:
397 _kernel_mode.eager_count -= 1
~/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
562 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
563 print_progress=print_progress, mock=mock,
--> 564 chunk_result=chunk_result)
565
566 self._chunk_result.update(chunk_result)
~/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
494 mock_max_memory=self._mock_max_memory,
495 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 496 res = graph_execution.execute(retval)
497 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
498 if mock:
~/mars/mars/executor.py in execute(self, retval)
387 # wait until all the futures completed
388 for future in executed_futures:
--> 389 future.result()
390
391 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/mars/mars/executor.py in _execute_operand(self, op)
265 # so we pass the first operand's first output to Executor.handle
266 first_op = ops[0]
--> 267 Executor.handle(first_op, results, self._mock)
268
269 # update maximal memory usage during execution
~/mars/mars/executor.py in handle(cls, op, results, mock)
450 runner = getattr(op, method_name)
451 try:
--> 452 return runner(results, op)
453 except NotImplementedError:
454 for op_cls in mapper.keys():
~/mars/mars/tensor/arithmetic/core.py in execute(cls, ctx, op)
112
113 with np.errstate(**op.err):
--> 114 ctx[op.outputs[0].key] = _handle_out_dtype(func(lhs, rhs, **kw), op.dtype)
115
116
cupy/core/_kernel.pyx in cupy.core._kernel.ufunc.__call__()
TypeError: Wrong arguments {'order': 'K'}
|
TypeError
|
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
kw = {"casting": op.casting} if op.out else {}
if op.out and op.where:
inputs, kw["out"], kw["where"] = inputs[:-2], inputs[-2].copy(), inputs[-1]
elif op.out:
inputs, kw["out"] = inputs[:-1], inputs[-1].copy()
elif op.where:
inputs, kw["where"] = inputs[:-1], inputs[-1]
with np.errstate(**op.err):
if op.is_gpu():
ret = cls._execute_gpu(op, xp, inputs[0], **kw)
else:
ret = cls._execute_cpu(op, xp, inputs[0], **kw)
ctx[op.outputs[0].key] = _handle_out_dtype(ret, op.dtype)
|
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
func = cls._get_func(xp)
with device(device_id):
kw = {"casting": op.casting} if op.out else {}
if op.out and op.where:
inputs, kw["out"], kw["where"] = inputs[:-2], inputs[-2].copy(), inputs[-1]
elif op.out:
inputs, kw["out"] = inputs[:-1], inputs[-1].copy()
elif op.where:
inputs, kw["where"] = inputs[:-1], inputs[-1]
if op.order != "K":
kw["order"] = op.order
with np.errstate(**op.err):
ctx[op.outputs[0].key] = _handle_out_dtype(func(inputs[0], **kw), op.dtype)
|
https://github.com/mars-project/mars/issues/728
|
In [13]: x = mt.random.rand(10, 10, gpu=True)
In [14]: (x + 1).execute()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-14-60cf72259190> in <module>
----> 1 (x + 1).execute()
~/mars/mars/tensor/core.py in execute(self, session, **kw)
450
451 def execute(self, session=None, **kw):
--> 452 return self._data.execute(session, **kw)
453
454
~/mars/mars/core.py in execute(self, session, **kw)
582 if session is None:
583 session = Session.default_or_local()
--> 584 return session.run(self, **kw)
585
586 def fetch(self, session=None, **kw):
~/mars/mars/session.py in run(self, *tileables, **kw)
150 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
151 for t in tileables)
--> 152 result = self._sess.run(*tileables, **kw)
153
154 for t in tileables:
~/mars/mars/session.py in run(self, *tileables, **kw)
61 if 'n_parallel' not in kw:
62 kw['n_parallel'] = cpu_count()
---> 63 res = self._executor.execute_tileables(tileables, **kw)
64 return res
65
~/mars/mars/utils.py in _wrapped(*args, **kwargs)
393 _kernel_mode.eager = False
394 _kernel_mode.eager_count = enter_eager_count + 1
--> 395 return func(*args, **kwargs)
396 finally:
397 _kernel_mode.eager_count -= 1
~/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
562 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
563 print_progress=print_progress, mock=mock,
--> 564 chunk_result=chunk_result)
565
566 self._chunk_result.update(chunk_result)
~/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result)
494 mock_max_memory=self._mock_max_memory,
495 fetch_keys=fetch_keys, no_intermediate=no_intermediate)
--> 496 res = graph_execution.execute(retval)
497 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
498 if mock:
~/mars/mars/executor.py in execute(self, retval)
387 # wait until all the futures completed
388 for future in executed_futures:
--> 389 future.result()
390
391 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/mars/mars/executor.py in _execute_operand(self, op)
265 # so we pass the first operand's first output to Executor.handle
266 first_op = ops[0]
--> 267 Executor.handle(first_op, results, self._mock)
268
269 # update maximal memory usage during execution
~/mars/mars/executor.py in handle(cls, op, results, mock)
450 runner = getattr(op, method_name)
451 try:
--> 452 return runner(results, op)
453 except NotImplementedError:
454 for op_cls in mapper.keys():
~/mars/mars/tensor/arithmetic/core.py in execute(cls, ctx, op)
112
113 with np.errstate(**op.err):
--> 114 ctx[op.outputs[0].key] = _handle_out_dtype(func(lhs, rhs, **kw), op.dtype)
115
116
cupy/core/_kernel.pyx in cupy.core._kernel.ufunc.__call__()
TypeError: Wrong arguments {'order': 'K'}
|
TypeError
|
def has_value(self):
if isinstance(self._index_value, self.RangeIndex):
return True
elif getattr(self._index_value, "_data", None) is not None:
return True
return False
|
def has_value(self):
if isinstance(self._index_value, self.RangeIndex):
return True
elif getattr(self, "_data", None) is not None:
return True
return False
|
https://github.com/mars-project/mars/issues/718
|
In [1]: import mars.dataframe as md
In [2]: import mars.tensor as mt
In [3]: df = md.DataFrame(mt.random.rand(10, 3), columns=list('abc'))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-8015891094de> in <module>
----> 1 df = md.DataFrame(mt.random.rand(10, 3), columns=list('abc'))
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
29 chunk_size=None, gpu=None, sparse=None):
30 if isinstance(data, TENSOR_TYPE):
---> 31 df = from_tensor(data, index=index, columns=columns, gpu=gpu, sparse=sparse)
32 elif isinstance(data, DATAFRAME_TYPE):
33 df = data
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in from_tensor(tensor, index, columns, gpu, sparse)
135 gpu = tensor.op.gpu if gpu is None else gpu
136 op = DataFrameFromTensor(dtypes=pd.Series([tensor.dtype] * col_num), gpu=gpu, sparse=sparse)
--> 137 return op(tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
51 def __call__(self, input_tensor, index, columns):
52 if index is not None or columns is not None:
---> 53 if input_tensor.shape != (len(index), len(columns)):
54 raise ValueError(
55 '({0},{1}) should have the same shape with tensor: {2}'.format(index, columns, input_tensor.shape))
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def __call__(self, input_tensor, index, columns):
if input_tensor.ndim != 1 and input_tensor.ndim != 2:
raise ValueError("Must pass 1-d or 2-d input")
if index is not None:
if input_tensor.shape[0] != len(index):
raise ValueError(
"index {0} should have the same shape with tensor: {1}".format(
index, input_tensor.shape[0]
)
)
if not isinstance(index, pd.Index):
if isinstance(index, Base):
raise NotImplementedError("The index value cannot be a tileable")
index = pd.Index(index)
index_value = parse_index(index, store_data=True)
else:
index_value = parse_index(pd.RangeIndex(start=0, stop=input_tensor.shape[0]))
if columns is not None:
if input_tensor.shape[1] != len(columns):
raise ValueError(
"columns {0} should have the same shape with tensor: {1}".format(
columns, input_tensor.shape[1]
)
)
if not isinstance(columns, pd.Index):
if isinstance(index, Base):
raise NotImplementedError("The index value cannot be a tileable")
columns = pd.Index(columns)
columns_value = parse_index(columns, store_data=True)
else:
if input_tensor.ndim == 1:
# convert to 1-d DataFrame
columns_value = parse_index(pd.RangeIndex(start=0, stop=1), store_data=True)
else:
columns_value = parse_index(
pd.RangeIndex(start=0, stop=input_tensor.shape[1]), store_data=True
)
return self.new_dataframe(
[input_tensor],
input_tensor.shape,
dtypes=self.dtypes,
index_value=index_value,
columns_value=columns_value,
)
|
def __call__(self, input_tensor, index, columns):
if index is not None or columns is not None:
if input_tensor.shape != (len(index), len(columns)):
raise ValueError(
"({0},{1}) should have the same shape with tensor: {2}".format(
index, columns, input_tensor.shape
)
)
if input_tensor.ndim == 1:
# convert to 1-d DataFrame
index_value = parse_index(pd.RangeIndex(start=0, stop=input_tensor.shape[0]))
columns_value = parse_index(pd.RangeIndex(start=0, stop=1))
elif input_tensor.ndim != 2:
raise ValueError("Must pass 1-d or 2-d input")
else:
# convert to DataFrame
index_value = parse_index(pd.RangeIndex(start=0, stop=input_tensor.shape[0]))
columns_value = parse_index(pd.RangeIndex(start=0, stop=input_tensor.shape[1]))
# overwrite index_value and columns_value if user has set them
if index is not None:
index_value = parse_index(index, store_data=True)
if columns is not None:
columns_value = parse_index(columns, store_data=True)
return self.new_dataframe(
[input_tensor],
input_tensor.shape,
dtypes=self.dtypes,
index_value=index_value,
columns_value=columns_value,
)
|
https://github.com/mars-project/mars/issues/718
|
In [1]: import mars.dataframe as md
In [2]: import mars.tensor as mt
In [3]: df = md.DataFrame(mt.random.rand(10, 3), columns=list('abc'))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-8015891094de> in <module>
----> 1 df = md.DataFrame(mt.random.rand(10, 3), columns=list('abc'))
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
29 chunk_size=None, gpu=None, sparse=None):
30 if isinstance(data, TENSOR_TYPE):
---> 31 df = from_tensor(data, index=index, columns=columns, gpu=gpu, sparse=sparse)
32 elif isinstance(data, DATAFRAME_TYPE):
33 df = data
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in from_tensor(tensor, index, columns, gpu, sparse)
135 gpu = tensor.op.gpu if gpu is None else gpu
136 op = DataFrameFromTensor(dtypes=pd.Series([tensor.dtype] * col_num), gpu=gpu, sparse=sparse)
--> 137 return op(tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
51 def __call__(self, input_tensor, index, columns):
52 if index is not None or columns is not None:
---> 53 if input_tensor.shape != (len(index), len(columns)):
54 raise ValueError(
55 '({0},{1}) should have the same shape with tensor: {2}'.format(index, columns, input_tensor.shape))
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def tile(cls, op):
out_df = op.outputs[0]
in_tensor = op.input
out_chunks = []
nsplits = in_tensor.nsplits
if any(any(np.isnan(ns)) for ns in nsplits):
raise NotImplementedError("NAN shape is not supported in DataFrame")
cum_size = [np.cumsum(s) for s in nsplits]
for in_chunk in in_tensor.chunks:
out_op = op.copy().reset_key()
if in_chunk.ndim == 1:
(i,) = in_chunk.index
column_stop = 1
index = (in_chunk.index[0], 0)
columns_value = parse_index(
out_df.columns.to_pandas()[0:1], store_data=True
)
else:
i, j = in_chunk.index
column_stop = cum_size[1][j]
index = in_chunk.index
columns_value = parse_index(
out_df.columns.to_pandas()[
column_stop - in_chunk.shape[1] : column_stop
],
store_data=True,
)
index_stop = cum_size[0][i]
if out_df.index_value.has_value():
index_value = parse_index(
out_df.index_value.to_pandas()[
index_stop - in_chunk.shape[0] : index_stop
],
store_data=True,
)
else:
index_value = parse_index(
pd.RangeIndex(start=index_stop - in_chunk.shape[0], stop=index_stop)
)
out_op.extra_params["index_stop"] = index_stop
out_op.extra_params["column_stop"] = column_stop
out_chunk = out_op.new_chunk(
[in_chunk],
shape=in_chunk.shape,
index=index,
index_value=index_value,
columns_value=columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
out_df.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=in_tensor.nsplits,
)
|
def tile(cls, op):
out_df = op.outputs[0]
in_tensor = op.input
out_chunks = []
nsplits = in_tensor.nsplits
if any(any(np.isnan(ns)) for ns in nsplits):
raise NotImplementedError("NAN shape is not supported in DataFrame")
cum_size = [np.cumsum(s) for s in nsplits]
for in_chunk in in_tensor.chunks:
out_op = op.copy().reset_key()
if in_chunk.ndim == 1:
(i,) = in_chunk.index
column_stop = 1
index = (in_chunk.index[0], 0)
columns_value = parse_index(
out_df.columns.to_pandas()[0:1], store_data=True
)
else:
i, j = in_chunk.index
column_stop = cum_size[1][j]
index = in_chunk.index
columns_value = parse_index(
out_df.columns.to_pandas()[
column_stop - in_chunk.shape[1] : column_stop
],
store_data=True,
)
index_stop = cum_size[0][i]
if out_df.index_value is not None:
index_value = parse_index(
out_df.index_value.to_pandas()[
index_stop - in_chunk.shape[0] : index_stop
],
store_data=True,
)
else:
index_value = parse_index(
pd.RangeIndex(start=index_stop - in_chunk.shape[0], stop=index_stop)
)
out_op.extra_params["index_stop"] = index_stop
out_op.extra_params["column_stop"] = column_stop
out_chunk = out_op.new_chunk(
[in_chunk],
shape=in_chunk.shape,
index=index,
index_value=index_value,
columns_value=columns_value,
)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_dataframes(
out_df.inputs,
out_df.shape,
dtypes=out_df.dtypes,
index_value=out_df.index_value,
columns_value=out_df.columns,
chunks=out_chunks,
nsplits=in_tensor.nsplits,
)
|
https://github.com/mars-project/mars/issues/718
|
In [1]: import mars.dataframe as md
In [2]: import mars.tensor as mt
In [3]: df = md.DataFrame(mt.random.rand(10, 3), columns=list('abc'))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-8015891094de> in <module>
----> 1 df = md.DataFrame(mt.random.rand(10, 3), columns=list('abc'))
~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse)
29 chunk_size=None, gpu=None, sparse=None):
30 if isinstance(data, TENSOR_TYPE):
---> 31 df = from_tensor(data, index=index, columns=columns, gpu=gpu, sparse=sparse)
32 elif isinstance(data, DATAFRAME_TYPE):
33 df = data
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in from_tensor(tensor, index, columns, gpu, sparse)
135 gpu = tensor.op.gpu if gpu is None else gpu
136 op = DataFrameFromTensor(dtypes=pd.Series([tensor.dtype] * col_num), gpu=gpu, sparse=sparse)
--> 137 return op(tensor, index, columns)
~/Workspace/mars/mars/dataframe/datasource/from_tensor.py in __call__(self, input_tensor, index, columns)
51 def __call__(self, input_tensor, index, columns):
52 if index is not None or columns is not None:
---> 53 if input_tensor.shape != (len(index), len(columns)):
54 raise ValueError(
55 '({0},{1}) should have the same shape with tensor: {2}'.format(index, columns, input_tensor.shape))
TypeError: object of type 'NoneType' has no len()
|
TypeError
|
def execute_tileables(
self,
tileables,
fetch=True,
n_parallel=None,
n_thread=None,
print_progress=False,
mock=False,
compose=True,
):
graph = DirectedGraph()
result_keys = []
to_release_keys = []
concat_keys = []
for tileable in tileables:
tileable.tiles()
chunk_keys = [c.key for c in tileable.chunks]
result_keys.extend(chunk_keys)
if tileable.key in self.stored_tileables:
self.stored_tileables[tileable.key][0].add(tileable.id)
else:
self.stored_tileables[tileable.key] = tuple(
[{tileable.id}, set(chunk_keys)]
)
if not fetch:
# no need to generate concat keys
pass
elif len(tileable.chunks) > 1:
# if need to fetch data and chunks more than 1, we concatenate them into 1
tileable = concat_tileable_chunks(tileable)
chunk = tileable.chunks[0]
result_keys.append(chunk.key)
# the concatenated key
concat_keys.append(chunk.key)
# after return the data to user, we release the reference
to_release_keys.append(chunk.key)
else:
concat_keys.append(tileable.chunks[0].key)
# Do not do compose here, because building graph has not finished yet
tileable.build_graph(
graph=graph,
tiled=True,
compose=False,
executed_keys=list(self._chunk_result.keys()),
)
if compose:
# finally do compose according to option
graph.compose(
keys=list(itertools.chain(*[[c.key for c in t.chunks] for t in tileables]))
)
self.execute_graph(
graph,
result_keys,
n_parallel=n_parallel or n_thread,
print_progress=print_progress,
mock=mock,
)
results = self._chunk_result
try:
if fetch:
return [results[k] for k in concat_keys]
else:
return
finally:
for k in to_release_keys:
del results[k]
|
def execute_tileables(
self,
tileables,
fetch=True,
n_parallel=None,
n_thread=None,
print_progress=False,
mock=False,
compose=True,
):
graph = DirectedGraph()
result_keys = []
to_release_keys = []
concat_keys = []
for tileable in tileables:
tileable.tiles()
chunk_keys = [c.key for c in tileable.chunks]
result_keys.extend(chunk_keys)
if tileable.key in self.stored_tileables:
self.stored_tileables[tileable.key][0].add(tileable.id)
else:
self.stored_tileables[tileable.key] = tuple(
[{tileable.id}, set(chunk_keys)]
)
if not fetch:
# no need to generate concat keys
pass
elif len(tileable.chunks) > 1:
# if need to fetch data and chunks more than 1, we concatenate them into 1
tileable = concat_tileable_chunks(tileable)
chunk = tileable.chunks[0]
result_keys.append(chunk.key)
# the concatenated key
concat_keys.append(chunk.key)
# after return the data to user, we release the reference
to_release_keys.append(chunk.key)
else:
concat_keys.append(tileable.chunks[0].key)
tileable.build_graph(
graph=graph,
tiled=True,
compose=compose,
executed_keys=list(self._chunk_result.keys()),
)
self.execute_graph(
graph,
result_keys,
n_parallel=n_parallel or n_thread,
print_progress=print_progress,
mock=mock,
)
results = self._chunk_result
try:
if fetch:
return [results[k] for k in concat_keys]
else:
return
finally:
for k in to_release_keys:
del results[k]
|
https://github.com/mars-project/mars/issues/642
|
In [1]: import mars.tensor as mt
In [2]: a = mt.random.rand(4, 4) + 1 - 3
In [3]: u, s, v = mt.linalg.svd(a)
In [4]: (u + 1).execute()
Out[4]:
array([[0.53491564, 0.50844154, 1.31934217, 0.33660917],
[0.48043745, 0.58359388, 1.12894514, 1.73486995],
[0.54248881, 1.05235106, 0.12344775, 0.86000348],
[0.4482439 , 1.76304364, 1.33622891, 0.98327479]])
In [5]: (u + 2).execute()
---------------------------------------------------------------------------
InvalidComposedNodeError Traceback (most recent call last)
<ipython-input-5-1dd2365be547> in <module>
----> 1 (u + 2).execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
442
443 def execute(self, session=None, **kw):
--> 444 return self._data.execute(session, **kw)
445
446
~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw)
572 if session is None:
573 session = Session.default_or_local()
--> 574 return session.run(self, **kw)
575
576 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw)
150 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
151 for t in tileables)
--> 152 result = self._sess.run(*tileables, **kw)
153
154 for t in tileables:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw)
61 if 'n_parallel' not in kw:
62 kw['n_parallel'] = cpu_count()
---> 63 res = self._executor.execute_tileables(tileables, **kw)
64 return res
65
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
389 _kernel_mode.eager = False
390 _kernel_mode.eager_count = enter_eager_count + 1
--> 391 return func(*args, **kwargs)
392 finally:
393 _kernel_mode.eager_count -= 1
~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
548
549 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 550 print_progress=print_progress, mock=mock)
551
552 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval)
469 :return: execution result
470 """
--> 471 optimized_graph = self._preprocess(graph, keys) if compose else graph
472
473 if not mock:
~/Documents/mars_dev/mars/mars/executor.py in _preprocess(self, graph, keys)
435 from .optimizes.core import Optimizer
436
--> 437 Optimizer(graph, self._engine).optimize(keys=keys)
438 return graph
439
~/Documents/mars_dev/mars/mars/optimizes/core.py in optimize(self, keys)
32
33 def optimize(self, keys=None):
---> 34 self._graph.decompose()
35 if self._engine == 'numpy':
36 return
~/Documents/mars_dev/mars/mars/graph.pyx in mars.graph.DirectedGraph.decompose()
~/Documents/mars_dev/mars/mars/fuse.pyx in mars.fuse.Fusion.decompose()
~/Documents/mars_dev/mars/mars/fuse.pyx in mars.fuse.Fusion._decompose_node()
InvalidComposedNodeError: Invalid composed node data
|
InvalidComposedNodeError
|
def execute(cls, ctx, op):
(a, b), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
if not op.sparse and is_sparse_module(xp):
# tell sparse to do calculation on numpy or cupy matmul
ctx[op.outputs[0].key] = xp.matmul(a, b, sparse=False)
else:
try:
# `np.matmul` support `order` argument in version 1.16
ctx[op.outputs[0].key] = xp.matmul(
a, b, casting=op.casting, order=op.order
)
except TypeError: # pragma: no cover
ctx[op.outputs[0].key] = xp.matmul(a, b).astype(
dtype=op.dtype, casting=op.casting, order=op.order
)
|
def execute(cls, ctx, op):
(a, b), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True
)
with device(device_id):
if not op.sparse and is_sparse_module(xp):
# tell sparse to do calculation on numpy or cupy matmul
ctx[op.outputs[0].key] = xp.matmul(a, b, sparse=False)
else:
ctx[op.outputs[0].key] = xp.matmul(a, b, casting=op.casting, order=op.order)
|
https://github.com/mars-project/mars/issues/642
|
In [1]: import mars.tensor as mt
In [2]: a = mt.random.rand(4, 4) + 1 - 3
In [3]: u, s, v = mt.linalg.svd(a)
In [4]: (u + 1).execute()
Out[4]:
array([[0.53491564, 0.50844154, 1.31934217, 0.33660917],
[0.48043745, 0.58359388, 1.12894514, 1.73486995],
[0.54248881, 1.05235106, 0.12344775, 0.86000348],
[0.4482439 , 1.76304364, 1.33622891, 0.98327479]])
In [5]: (u + 2).execute()
---------------------------------------------------------------------------
InvalidComposedNodeError Traceback (most recent call last)
<ipython-input-5-1dd2365be547> in <module>
----> 1 (u + 2).execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
442
443 def execute(self, session=None, **kw):
--> 444 return self._data.execute(session, **kw)
445
446
~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw)
572 if session is None:
573 session = Session.default_or_local()
--> 574 return session.run(self, **kw)
575
576 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw)
150 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
151 for t in tileables)
--> 152 result = self._sess.run(*tileables, **kw)
153
154 for t in tileables:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw)
61 if 'n_parallel' not in kw:
62 kw['n_parallel'] = cpu_count()
---> 63 res = self._executor.execute_tileables(tileables, **kw)
64 return res
65
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
389 _kernel_mode.eager = False
390 _kernel_mode.eager_count = enter_eager_count + 1
--> 391 return func(*args, **kwargs)
392 finally:
393 _kernel_mode.eager_count -= 1
~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
548
549 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 550 print_progress=print_progress, mock=mock)
551
552 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval)
469 :return: execution result
470 """
--> 471 optimized_graph = self._preprocess(graph, keys) if compose else graph
472
473 if not mock:
~/Documents/mars_dev/mars/mars/executor.py in _preprocess(self, graph, keys)
435 from .optimizes.core import Optimizer
436
--> 437 Optimizer(graph, self._engine).optimize(keys=keys)
438 return graph
439
~/Documents/mars_dev/mars/mars/optimizes/core.py in optimize(self, keys)
32
33 def optimize(self, keys=None):
---> 34 self._graph.decompose()
35 if self._engine == 'numpy':
36 return
~/Documents/mars_dev/mars/mars/graph.pyx in mars.graph.DirectedGraph.decompose()
~/Documents/mars_dev/mars/mars/fuse.pyx in mars.fuse.Fusion.decompose()
~/Documents/mars_dev/mars/mars/fuse.pyx in mars.fuse.Fusion._decompose_node()
InvalidComposedNodeError: Invalid composed node data
|
InvalidComposedNodeError
|
def unify_nsplits(*tensor_axes):
from .rechunk import rechunk
tensor_splits = [
dict((a, split) for a, split in izip(axes, t.nsplits) if split != (1,))
for t, axes in tensor_axes
if t.nsplits
]
common_axes = (
reduce(operator.and_, [set(lkeys(ts)) for ts in tensor_splits])
if tensor_splits
else set()
)
axes_unified_splits = dict(
(ax, decide_unify_split(*(t[ax] for t in tensor_splits))) for ax in common_axes
)
if len(common_axes) == 0:
return tuple(t[0] for t in tensor_axes)
res = []
for t, axes in tensor_axes:
new_chunk = dict(
(i, axes_unified_splits[ax])
for ax, i in zip(axes, range(t.ndim))
if ax in axes_unified_splits
)
res.append(rechunk(t, new_chunk).single_tiles())
return tuple(res)
|
def unify_nsplits(*tensor_axes):
from .rechunk import rechunk
tensor_splits = [
dict((a, split) for a, split in izip(axes, t.nsplits) if split != (1,))
for t, axes in tensor_axes
]
common_axes = reduce(operator.and_, [set(lkeys(ts)) for ts in tensor_splits])
axes_unified_splits = dict(
(ax, decide_unify_split(*(t[ax] for t in tensor_splits))) for ax in common_axes
)
if len(common_axes) == 0:
return tuple(t[0] for t in tensor_axes)
res = []
for t, axes in tensor_axes:
new_chunk = dict(
(i, axes_unified_splits[ax])
for ax, i in zip(axes, range(t.ndim))
if ax in axes_unified_splits
)
res.append(rechunk(t, new_chunk).single_tiles())
return tuple(res)
|
https://github.com/mars-project/mars/issues/535
|
In [5]: import mars.tensor as mt
In [6]: a = mt.random.rand(1, 10, chunk_size=3)
In [7]: b = mt.add(a[:, :5], 1, out=a[:, 5:])
In [8]: b.execute()
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/Documents/mars_dev/mars/mars/tiles.py in _dispatch(self, op)
110 try:
--> 111 handler = self._handlers[op_cls]
112 return handler(op)
KeyError: <class 'mars.tensor.expressions.arithmetic.add.TensorAdd'>
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-8-be8c8c42b9b4> in <module>
----> 1 b.execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
394
395 def execute(self, session=None, **kw):
--> 396 return self._data.execute(session, **kw)
397
398
~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw)
553 if session is None:
554 session = Session.default_or_local()
--> 555 return session.run(self, **kw)
556
557 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw)
113 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t
114 for t in tileables)
--> 115 result = self._sess.run(*tileables, **kw)
116
117 for t in tileables:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw)
54 if 'n_parallel' not in kw:
55 kw['n_parallel'] = cpu_count()
---> 56 res = self._executor.execute_tileables(tileables, **kw)
57 return res
58
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
387 try:
388 _kernel_mode.eager = False
--> 389 return func(*args, **kwargs)
390 finally:
391 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose)
523 concat_keys = []
524 for tileable in tileables:
--> 525 tileable.tiles()
526 chunk_keys = [c.key for c in tileable.chunks]
527 result_keys.extend(chunk_keys)
~/Documents/mars_dev/mars/mars/core.py in tiles(self)
472
473 def tiles(self):
--> 474 return handler.tiles(self)
475
476 def single_tiles(self):
~/Documents/mars_dev/mars/mars/tiles.py in tiles(self, tiles_obj)
172 if not preds or accessible:
173 if node.is_coarse() and node.op:
--> 174 tiled = self._dispatch(node.op)
175 self._assign_to([t.data for t in tiled], node.op.outputs)
176 visited.add(node)
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
387 try:
388 _kernel_mode.eager = False
--> 389 return func(*args, **kwargs)
390 finally:
391 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/tiles.py in _dispatch(self, op)
114 if hasattr(op_cls, 'tile'):
115 # has tile implementation
--> 116 return op_cls.tile(op)
117 for op_clz in self._handlers.keys():
118 if issubclass(op_cls, op_clz):
~/Documents/mars_dev/mars/mars/tensor/expressions/arithmetic/core.py in tile(cls, op)
36
37 chunk_shapes = [t.chunk_shape for t in inputs]
---> 38 out_chunk_shape = broadcast_shape(*chunk_shapes)
39
40 out_chunks = [list() for _ in op.outputs]
~/Documents/mars_dev/mars/mars/tensor/expressions/utils.py in broadcast_shape(*shapes)
76 if any(i != -1 and i != 1 and i != shape and not np.isnan(i) for i in ss):
77 raise ValueError('Operands could not be broadcast together '
---> 78 'with shape {0}'.format(' '.join(map(str, shapes))))
79 out_shapes.append(shape)
80 return tuple(reversed(out_shapes))
ValueError: Operands could not be broadcast together with shape (1, 2) () (1, 3)
|
KeyError
|
def submit_graph(
self, session_id, serialized_graph, graph_key, target, compose=True, wait=True
):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
session_ref.submit_tileable_graph(
serialized_graph, graph_key, target, compose=compose, _tell=not wait
)
|
def submit_graph(self, session_id, serialized_graph, graph_key, target, compose=True):
session_uid = SessionActor.gen_uid(session_id)
session_ref = self.get_actor_ref(session_uid)
session_ref.submit_tileable_graph(
serialized_graph, graph_key, target, compose=compose, _tell=True
)
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def get_graph_state(self, session_id, graph_key):
from .scheduler import GraphState
graph_meta_ref = self.get_graph_meta_ref(session_id, graph_key)
if self.actor_client.has_actor(graph_meta_ref):
state_obj = graph_meta_ref.get_state()
state = state_obj.value if state_obj else "preparing"
else:
raise GraphNotExists
state = GraphState(state.lower())
return state
|
def get_graph_state(self, session_id, graph_key):
from .scheduler import GraphState
graph_meta_ref = self.get_graph_meta_ref(session_id, graph_key)
if self.actor_client.has_actor(graph_meta_ref):
state_obj = graph_meta_ref.get_state()
state = state_obj.value if state_obj else "preparing"
else:
state = "preparing"
state = GraphState(state.lower())
return state
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def calc_operand_assignments(self, op_keys, input_chunk_metas=None):
"""
Decide target worker for given chunks.
:param op_keys: keys of operands to assign
:param input_chunk_metas: chunk metas for graph-level inputs, grouped by initial chunks
:type input_chunk_metas: dict[str, dict[str, mars.scheduler.chunkmeta.WorkerMeta]]
:return: dict mapping operand keys into worker endpoints
"""
graph = self._graph
op_states = self._op_states
cur_assigns = OrderedDict(self._fixed_assigns)
key_to_chunks = defaultdict(list)
for n in graph:
key_to_chunks[n.op.key].append(n)
descendant_readies = set()
op_keys = set(op_keys)
chunks_to_assign = [key_to_chunks[k][0] for k in op_keys]
if any(graph.count_predecessors(c) for c in chunks_to_assign):
graph = graph.copy()
for c in graph:
if c.op.key not in op_keys:
continue
for pred in graph.predecessors(c):
graph.remove_edge(pred, c)
assigned_counts = defaultdict(lambda: 0)
worker_op_keys = defaultdict(set)
if cur_assigns:
for op_key, state in op_states.items():
if (
op_key not in op_keys
and state == OperandState.READY
and op_key in cur_assigns
):
descendant_readies.add(op_key)
assigned_counts[cur_assigns[op_key]] += 1
# calculate the number of nodes to be assigned to every worker
# given number of workers and existing assignments
pre_worker_quotas = self._calc_worker_assign_limits(
len(chunks_to_assign) + len(descendant_readies), assigned_counts
)
# pre-assign nodes given pre-determined transfer sizes
if not input_chunk_metas:
worker_quotas = pre_worker_quotas
else:
for op_key, worker in self._iter_assignments_by_transfer_sizes(
pre_worker_quotas, input_chunk_metas
):
if op_key in cur_assigns:
continue
assigned_counts[worker] += 1
cur_assigns[op_key] = worker
worker_op_keys[worker].add(op_key)
worker_quotas = self._calc_worker_assign_limits(
len(chunks_to_assign) + len(descendant_readies), assigned_counts
)
if cur_assigns:
# calculate ranges of nodes already assigned
for op_key, worker in self._iter_successor_assigns(cur_assigns):
cur_assigns[op_key] = worker
worker_op_keys[worker].add(op_key)
logger.debug("Worker assign quotas: %r", worker_quotas)
# calculate expected descendant count (spread range) of
# every worker and subtract assigned number from it
average_spread_range = len(graph) * 1.0 / len(self._worker_slots)
spread_ranges = defaultdict(lambda: average_spread_range)
for worker in cur_assigns.values():
spread_ranges[worker] -= 1
logger.debug("Scan spread ranges: %r", dict(spread_ranges))
# assign pass 1: assign from fixed groups
sorted_workers = sorted(
worker_op_keys, reverse=True, key=lambda k: len(worker_op_keys[k])
)
for worker in sorted_workers:
start_chunks = reduce(
operator.add, (key_to_chunks[op_key] for op_key in worker_op_keys[worker])
)
self._assign_by_bfs(
start_chunks,
worker,
worker_quotas,
spread_ranges,
op_keys,
cur_assigns,
graph=graph,
)
# assign pass 2: assign from other nodes to be assigned
sorted_candidates = [v for v in chunks_to_assign]
while max(worker_quotas.values()):
worker = max(worker_quotas, key=lambda k: worker_quotas[k])
cur = sorted_candidates.pop()
while cur.op.key in cur_assigns:
cur = sorted_candidates.pop()
self._assign_by_bfs(
cur, worker, worker_quotas, spread_ranges, op_keys, cur_assigns, graph=graph
)
keys_to_assign = set(n.op.key for n in chunks_to_assign)
return OrderedDict((k, v) for k, v in cur_assigns.items() if k in keys_to_assign)
|
def calc_operand_assignments(self, op_keys, input_chunk_metas=None):
"""
Decide target worker for given chunks.
:param op_keys: keys of operands to assign
:param input_chunk_metas: chunk metas for graph-level inputs, grouped by initial chunks
:type input_chunk_metas: dict[str, dict[str, mars.scheduler.chunkmeta.WorkerMeta]]
:return: dict mapping operand keys into worker endpoints
"""
graph = self._graph
op_states = self._op_states
cur_assigns = self._fixed_assigns.copy()
key_to_chunks = defaultdict(list)
for n in graph:
key_to_chunks[n.op.key].append(n)
descendant_readies = set()
op_keys = set(op_keys)
chunks_to_assign = [key_to_chunks[k][0] for k in op_keys]
if any(graph.count_predecessors(c) for c in chunks_to_assign):
graph = graph.copy()
for c in graph:
if c.op.key not in op_keys:
continue
for pred in graph.predecessors(c):
graph.remove_edge(pred, c)
assigned_counts = defaultdict(lambda: 0)
worker_op_keys = defaultdict(set)
if cur_assigns:
for op_key, state in op_states.items():
if (
op_key not in op_keys
and state == OperandState.READY
and op_key in cur_assigns
):
descendant_readies.add(op_key)
assigned_counts[cur_assigns[op_key]] += 1
# calculate the number of nodes to be assigned to every worker
# given number of workers and existing assignments
pre_worker_quotas = self._calc_worker_assign_limits(
len(chunks_to_assign) + len(descendant_readies), assigned_counts
)
# pre-assign nodes given pre-determined transfer sizes
if not input_chunk_metas:
worker_quotas = pre_worker_quotas
else:
for op_key, worker in self._iter_assignments_by_transfer_sizes(
pre_worker_quotas, input_chunk_metas
):
if op_key in cur_assigns:
continue
assigned_counts[worker] += 1
cur_assigns[op_key] = worker
worker_op_keys[worker].add(op_key)
worker_quotas = self._calc_worker_assign_limits(
len(chunks_to_assign) + len(descendant_readies), assigned_counts
)
if cur_assigns:
# calculate ranges of nodes already assigned
for op_key, worker in self._iter_successor_assigns(cur_assigns):
cur_assigns[op_key] = worker
worker_op_keys[worker].add(op_key)
logger.debug("Worker assign quotas: %r", worker_quotas)
# calculate expected descendant count (spread range) of
# every worker and subtract assigned number from it
average_spread_range = len(graph) * 1.0 / len(self._worker_slots)
spread_ranges = defaultdict(lambda: average_spread_range)
for worker in cur_assigns.values():
spread_ranges[worker] -= 1
logger.debug("Scan spread ranges: %r", dict(spread_ranges))
# assign pass 1: assign from fixed groups
sorted_workers = sorted(
worker_op_keys, reverse=True, key=lambda k: len(worker_op_keys[k])
)
for worker in sorted_workers:
start_chunks = reduce(
operator.add, (key_to_chunks[op_key] for op_key in worker_op_keys[worker])
)
self._assign_by_bfs(
start_chunks,
worker,
worker_quotas,
spread_ranges,
op_keys,
cur_assigns,
graph=graph,
)
# assign pass 2: assign from other nodes to be assigned
sorted_candidates = [v for v in chunks_to_assign]
while max(worker_quotas.values()):
worker = max(worker_quotas, key=lambda k: worker_quotas[k])
cur = sorted_candidates.pop()
while cur.op.key in cur_assigns:
cur = sorted_candidates.pop()
self._assign_by_bfs(
cur, worker, worker_quotas, spread_ranges, op_keys, cur_assigns, graph=graph
)
return dict((n.op.key, cur_assigns[n.op.key]) for n in chunks_to_assign)
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def assign_operand_workers(self, op_keys, input_chunk_metas=None, analyzer=None):
operand_infos = self._operand_infos
chunk_graph = self.get_chunk_graph()
if analyzer is None:
analyzer = GraphAnalyzer(chunk_graph, self._get_worker_slots())
assignments = analyzer.calc_operand_assignments(
op_keys, input_chunk_metas=input_chunk_metas
)
for idx, (k, v) in enumerate(assignments.items()):
operand_infos[k]["optimize"]["placement_order"] = idx
operand_infos[k]["target_worker"] = v
return assignments
|
def assign_operand_workers(self, op_keys, input_chunk_metas=None, analyzer=None):
operand_infos = self._operand_infos
chunk_graph = self.get_chunk_graph()
if analyzer is None:
analyzer = GraphAnalyzer(chunk_graph, self._get_worker_slots())
assignments = analyzer.calc_operand_assignments(
op_keys, input_chunk_metas=input_chunk_metas
)
for k, v in assignments.items():
operand_infos[k]["target_worker"] = v
return assignments
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def handle_worker_change(self, adds, removes, lost_chunks, handle_later=True):
"""
Calculate and propose changes of operand states given changes
in workers and lost chunks.
:param adds: endpoints of workers newly added to the cluster
:param removes: endpoints of workers removed to the cluster
:param lost_chunks: keys of lost chunks
:param handle_later: run the function later, only used in this actor
"""
if self._state in GraphState.TERMINATED_STATES:
return
if handle_later:
# Run the fail-over process later.
# This is the default behavior as we need to make sure that
# all crucial state changes are received by GraphActor.
# During the delay, no operands are allowed to be freed.
self._operand_free_paused = True
self._worker_adds.update(adds)
self._worker_removes.update(removes)
self.ref().handle_worker_change(
adds, removes, lost_chunks, handle_later=False, _delay=0.5, _tell=True
)
return
else:
self._operand_free_paused = False
adds = self._worker_adds
self._worker_adds = set()
removes = self._worker_removes
self._worker_removes = set()
if not adds and not removes:
return
if all(ep in self._assigned_workers for ep in adds) and not any(
ep in self._assigned_workers for ep in removes
):
return
worker_slots = self._get_worker_slots()
self._assigned_workers = set(worker_slots)
removes_set = set(removes)
# collect operand states
operand_infos = self._operand_infos
fixed_assigns = dict()
graph_states = dict()
for key, op_info in operand_infos.items():
op_worker = op_info.get("worker")
if op_worker is None:
continue
op_state = graph_states[key] = op_info["state"]
# RUNNING nodes on dead workers should be moved to READY first
if op_state == OperandState.RUNNING and op_worker in removes_set:
graph_states[key] = OperandState.READY
if op_worker in worker_slots:
fixed_assigns[key] = op_info["worker"]
graph = self.get_chunk_graph()
new_states = dict()
ordered_states = OrderedDict(
sorted(
((k, v) for k, v in graph_states.items()),
key=lambda d: operand_infos[d[0]]["optimize"].get("placement_order", 0),
)
)
analyzer = GraphAnalyzer(
graph, worker_slots, fixed_assigns, ordered_states, lost_chunks
)
if removes or lost_chunks:
new_states = analyzer.analyze_state_changes()
logger.debug(
"%d chunks lost. %d operands changed state.",
len(lost_chunks),
len(new_states),
)
logger.debug("Start reallocating initial operands")
new_targets = dict(self._assign_initial_workers(analyzer))
futures = []
# make sure that all readies and runnings are included to be checked
for key, op_info in operand_infos.items():
if key in new_states:
continue
state = op_info["state"]
if (
state == OperandState.RUNNING
and operand_infos[key]["worker"] not in removes_set
):
continue
if state in (OperandState.READY, OperandState.RUNNING):
new_states[key] = state
for key, state in new_states.items():
new_target = new_targets.get(key)
op_info = operand_infos[key]
from_state = op_info["state"]
# record the target state in special info key
# in case of concurrency issues
op_info["failover_state"] = state
op_ref = self._get_operand_ref(key)
# states may easily slip into the next state when we are
# calculating fail-over states. Hence we need to include them
# into source states.
if from_state == OperandState.READY:
from_states = [from_state, OperandState.RUNNING]
elif from_state == OperandState.RUNNING:
from_states = [from_state, OperandState.FINISHED]
elif from_state == OperandState.FINISHED:
from_states = [from_state, OperandState.FREED]
else:
from_states = [from_state]
futures.append(
op_ref.move_failover_state(
from_states, state, new_target, removes, _tell=True, _wait=False
)
)
[f.result() for f in futures]
self._dump_failover_info(adds, removes, lost_chunks, new_states)
|
def handle_worker_change(self, adds, removes, lost_chunks, handle_later=True):
"""
Calculate and propose changes of operand states given changes
in workers and lost chunks.
:param adds: endpoints of workers newly added to the cluster
:param removes: endpoints of workers removed to the cluster
:param lost_chunks: keys of lost chunks
:param handle_later: run the function later, only used in this actor
"""
if self._state in GraphState.TERMINATED_STATES:
return
if handle_later:
# Run the fail-over process later.
# This is the default behavior as we need to make sure that
# all crucial state changes are received by GraphActor.
# During the delay, no operands are allowed to be freed.
self._operand_free_paused = True
self._worker_adds.update(adds)
self._worker_removes.update(removes)
self.ref().handle_worker_change(
adds, removes, lost_chunks, handle_later=False, _delay=0.5, _tell=True
)
return
else:
self._operand_free_paused = False
adds = self._worker_adds
self._worker_adds = set()
removes = self._worker_removes
self._worker_removes = set()
if not adds and not removes:
return
if all(ep in self._assigned_workers for ep in adds) and not any(
ep in self._assigned_workers for ep in removes
):
return
worker_slots = self._get_worker_slots()
self._assigned_workers = set(worker_slots)
removes_set = set(removes)
# collect operand states
operand_infos = self._operand_infos
fixed_assigns = dict()
graph_states = dict()
for key, op_info in operand_infos.items():
op_worker = op_info.get("worker")
if op_worker is None:
continue
op_state = graph_states[key] = op_info["state"]
# RUNNING nodes on dead workers should be moved to READY first
if op_state == OperandState.RUNNING and op_worker in removes_set:
graph_states[key] = OperandState.READY
if op_worker in worker_slots:
fixed_assigns[key] = op_info["worker"]
graph = self.get_chunk_graph()
new_states = dict()
analyzer = GraphAnalyzer(
graph, worker_slots, fixed_assigns, graph_states, lost_chunks
)
if removes or lost_chunks:
new_states = analyzer.analyze_state_changes()
logger.debug(
"%d chunks lost. %d operands changed state.",
len(lost_chunks),
len(new_states),
)
logger.debug("Start reallocating initial operands")
new_targets = dict(self._assign_initial_workers(analyzer))
futures = []
# make sure that all readies and runnings are included to be checked
for key, op_info in operand_infos.items():
if key in new_states:
continue
state = op_info["state"]
if (
state == OperandState.RUNNING
and operand_infos[key]["worker"] not in removes_set
):
continue
if state in (OperandState.READY, OperandState.RUNNING):
new_states[key] = state
for key, state in new_states.items():
new_target = new_targets.get(key)
op_info = operand_infos[key]
from_state = op_info["state"]
# record the target state in special info key
# in case of concurrency issues
op_info["failover_state"] = state
op_ref = self._get_operand_ref(key)
# states may easily slip into the next state when we are
# calculating fail-over states. Hence we need to include them
# into source states.
if from_state == OperandState.READY:
from_states = [from_state, OperandState.RUNNING]
elif from_state == OperandState.RUNNING:
from_states = [from_state, OperandState.FINISHED]
elif from_state == OperandState.FINISHED:
from_states = [from_state, OperandState.FREED]
else:
from_states = [from_state]
futures.append(
op_ref.move_failover_state(
from_states, state, new_target, removes, _tell=True, _wait=False
)
)
[f.result() for f in futures]
self._dump_failover_info(adds, removes, lost_chunks, new_states)
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def get(self, session_id, graph_key):
from ..scheduler.utils import GraphState
try:
state = self.web_api.get_graph_state(session_id, graph_key)
except GraphNotExists:
raise web.HTTPError(404, "Graph not exists")
if state == GraphState.RUNNING:
self.write(json.dumps(dict(state="running")))
elif state == GraphState.SUCCEEDED:
self.write(json.dumps(dict(state="success")))
elif state == GraphState.FAILED:
self.write(json.dumps(dict(state="failed")))
elif state == GraphState.CANCELLED:
self.write(json.dumps(dict(state="cancelled")))
elif state == GraphState.CANCELLING:
self.write(json.dumps(dict(state="cancelling")))
elif state == GraphState.PREPARING:
self.write(json.dumps(dict(state="preparing")))
|
def get(self, session_id, graph_key):
from ..scheduler.utils import GraphState
state = self.web_api.get_graph_state(session_id, graph_key)
if state == GraphState.RUNNING:
self.write(json.dumps(dict(state="running")))
elif state == GraphState.SUCCEEDED:
self.write(json.dumps(dict(state="success")))
elif state == GraphState.FAILED:
self.write(json.dumps(dict(state="failed")))
elif state == GraphState.CANCELLED:
self.write(json.dumps(dict(state="cancelled")))
elif state == GraphState.CANCELLING:
self.write(json.dumps(dict(state="cancelling")))
elif state == GraphState.PREPARING:
self.write(json.dumps(dict(state="preparing")))
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.