after_merge
stringlengths
28
79.6k
before_merge
stringlengths
20
79.6k
url
stringlengths
38
71
full_traceback
stringlengths
43
922k
traceback_type
stringclasses
555 values
def _update_tileable_and_its_chunk_shapes(self): need_update_tileable_to_tiled = dict() for tileable in self._chunk_graph_builder.prev_tileable_graph: if tileable.key in self._target_tileable_finished: tiled = self._tileable_key_opid_to_tiled[tileable.key, tileable.op.id][-1] if not has_unknown_shape(tiled): continue need_update_tileable_to_tiled[tileable] = tiled if len(need_update_tileable_to_tiled) == 0: return need_update_chunks = list( c for t in need_update_tileable_to_tiled.values() for c in t.chunks ) chunk_metas = self.chunk_meta.batch_get_chunk_meta( self._session_id, list(c.key for c in need_update_chunks) ) ops_to_restart = set() keys = [] for chunk, chunk_meta in zip(need_update_chunks, chunk_metas): if chunk_meta is None: ops_to_restart.add(chunk.op.key) keys.append(chunk.key) else: chunk.data._shape = chunk_meta.chunk_shape if ops_to_restart: for op_key in ops_to_restart: self._get_operand_ref(op_key).start_operand( OperandState.READY, _tell=True, _wait=False ) raise RuntimeError( f"Cannot find chunks {keys}. Operands {ops_to_restart} restarted." ) for tileable, tiled in need_update_tileable_to_tiled.items(): chunk_idx_to_shape = OrderedDict((c.index, c.shape) for c in tiled.chunks) nsplits = calc_nsplits(chunk_idx_to_shape) tiled._nsplits = nsplits if any(np.isnan(s) for s in tileable.shape): shape = tuple(sum(ns) for ns in nsplits) tileable._update_shape(shape) tiled._update_shape(shape)
def _update_tileable_and_its_chunk_shapes(self): need_update_tileable_to_tiled = dict() for tileable in self._chunk_graph_builder.prev_tileable_graph: if tileable.key in self._target_tileable_finished: tiled = self._tileable_key_opid_to_tiled[tileable.key, tileable.op.id][-1] if not has_unknown_shape(tiled): continue need_update_tileable_to_tiled[tileable] = tiled if len(need_update_tileable_to_tiled) == 0: return need_update_chunks = list( c for t in need_update_tileable_to_tiled.values() for c in t.chunks ) chunk_metas = self.chunk_meta.batch_get_chunk_meta( self._session_id, list(c.key for c in need_update_chunks) ) for chunk, chunk_meta in zip(need_update_chunks, chunk_metas): chunk.data._shape = chunk_meta.chunk_shape for tileable, tiled in need_update_tileable_to_tiled.items(): chunk_idx_to_shape = OrderedDict((c.index, c.shape) for c in tiled.chunks) nsplits = calc_nsplits(chunk_idx_to_shape) tiled._nsplits = nsplits if any(np.isnan(s) for s in tileable.shape): shape = tuple(sum(ns) for ns in nsplits) tileable._update_shape(shape) tiled._update_shape(shape)
https://github.com/mars-project/mars/issues/1741
2020-12-02 11:19:40,309 mars.scheduler.operands.common 87 ERROR Attempt 1: Unexpected error KeyError occurred in executing operand 5c7a3b06d448300987640036d2f5a34e in 11.238.145.234:49708 Traceback (most recent call last): File "/home/admin/work/_public-mars-0.5.5.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/_public-mars-0.5.5.zip/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 564, in execute_graph quota_request = self._prepare_quota_request(session_id, graph_key) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 249, in _prepare_quota_request memory_estimations = self._estimate_calc_memory(session_id, graph_key) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 213, in _estimate_calc_memory res = executor.execute_graph(graph_record.graph, graph_record.chunk_targets, mock=True) File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 690, in execute_graph res = graph_execution.execute(retval) File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 574, in execute return [self._chunk_results[key] for key in self._keys] File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 574, in <listcomp> return [self._chunk_results[key] for key in self._keys] KeyError: '3990ec90331559138b6ecbc6d76fbd0d'
KeyError
def append_graph(self, graph_key, op_info): super().append_graph(graph_key, op_info) if not self._is_terminal: self._is_terminal = op_info.get("is_terminal") if self.state in OperandState.STORED_STATES: metas = self.chunk_meta.batch_get_chunk_meta( self._session_id, self._io_meta.get("chunks") ) if any(meta is None for meta in metas): self.state = OperandState.UNSCHEDULED if self.state not in OperandState.TERMINATED_STATES: for in_key in self._pred_keys: self._get_operand_actor(in_key).remove_finished_successor( self._op_key, _tell=True, _wait=False ) self.start_operand() elif self.state in OperandState.STORED_STATES: for out_key in self._succ_keys: self._get_operand_actor(out_key).add_finished_predecessor( self._op_key, self.worker, output_sizes=self._data_sizes, output_shapes=self._data_shapes, _tell=True, _wait=False, ) # require more chunks to execute if the completion caused no successors to run if self._is_terminal: # update records in GraphActor to help decide if the whole graph finished execution self._add_finished_terminal()
def append_graph(self, graph_key, op_info): super().append_graph(graph_key, op_info) if not self._is_terminal: self._is_terminal = op_info.get("is_terminal") if self.state not in OperandState.TERMINATED_STATES: for in_key in self._pred_keys: self._get_operand_actor(in_key).remove_finished_successor( self._op_key, _tell=True, _wait=False ) self.start_operand() elif self.state in OperandState.STORED_STATES: for out_key in self._succ_keys: self._get_operand_actor(out_key).add_finished_predecessor( self._op_key, self.worker, output_sizes=self._data_sizes, output_shapes=self._data_shapes, _tell=True, _wait=False, ) # require more chunks to execute if the completion caused no successors to run if self._is_terminal: # update records in GraphActor to help decide if the whole graph finished execution self._add_finished_terminal()
https://github.com/mars-project/mars/issues/1741
2020-12-02 11:19:40,309 mars.scheduler.operands.common 87 ERROR Attempt 1: Unexpected error KeyError occurred in executing operand 5c7a3b06d448300987640036d2f5a34e in 11.238.145.234:49708 Traceback (most recent call last): File "/home/admin/work/_public-mars-0.5.5.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/_public-mars-0.5.5.zip/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 564, in execute_graph quota_request = self._prepare_quota_request(session_id, graph_key) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 249, in _prepare_quota_request memory_estimations = self._estimate_calc_memory(session_id, graph_key) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 213, in _estimate_calc_memory res = executor.execute_graph(graph_record.graph, graph_record.chunk_targets, mock=True) File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 690, in execute_graph res = graph_execution.execute(retval) File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 574, in execute return [self._chunk_results[key] for key in self._keys] File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 574, in <listcomp> return [self._chunk_results[key] for key in self._keys] KeyError: '3990ec90331559138b6ecbc6d76fbd0d'
KeyError
def create_reader( self, session_id, data_key, source_devices, packed=False, packed_compression=None, _promise=True, ): """ Create a data reader from existing data and return in a Promise. If no readers can be created, will try copying the data into a readable storage. :param session_id: session id :param data_key: data key :param source_devices: devices to read from :param packed: create a reader to read packed data format :param packed_compression: compression format to use when reading as packed :param _promise: return a promise """ source_devices = self._normalize_devices(source_devices) stored_devs = set(self._manager_ref.get_data_locations(session_id, [data_key])[0]) for src_dev in source_devices: if src_dev not in stored_devs: continue handler = self.get_storage_handler(src_dev) try: logger.debug( "Creating %s reader for (%s, %s) on %s", "packed" if packed else "bytes", session_id, data_key, handler.storage_type, ) return handler.create_bytes_reader( session_id, data_key, packed=packed, packed_compression=packed_compression, _promise=_promise, ) except AttributeError: # pragma: no cover raise IOError(f"Device {src_dev} does not support direct reading.") if _promise: return self.copy_to(session_id, [data_key], source_devices).then( lambda *_: self.create_reader( session_id, data_key, source_devices, packed=packed ) ) else: raise IOError( f"Cannot return a non-promise result for key {data_key}, stored_devs {stored_devs!r}" )
def create_reader( self, session_id, data_key, source_devices, packed=False, packed_compression=None, _promise=True, ): """ Create a data reader from existing data and return in a Promise. If no readers can be created, will try copying the data into a readable storage. :param session_id: session id :param data_key: data key :param source_devices: devices to read from :param packed: create a reader to read packed data format :param packed_compression: compression format to use when reading as packed :param _promise: return a promise """ source_devices = self._normalize_devices(source_devices) stored_devs = set(self._manager_ref.get_data_locations(session_id, [data_key])[0]) for src_dev in source_devices: if src_dev not in stored_devs: continue handler = self.get_storage_handler(src_dev) try: logger.debug( "Creating %s reader for (%s, %s) on %s", "packed" if packed else "bytes", session_id, data_key, handler.storage_type, ) return handler.create_bytes_reader( session_id, data_key, packed=packed, packed_compression=packed_compression, _promise=_promise, ) except AttributeError: # pragma: no cover raise IOError(f"Device {src_dev} does not support direct reading.") if _promise: return self.copy_to(session_id, [data_key], source_devices).then( lambda *_: self.create_reader( session_id, data_key, source_devices, packed=packed ) ) else: raise IOError("Cannot return a non-promise result")
https://github.com/mars-project/mars/issues/1741
2020-12-02 11:19:40,309 mars.scheduler.operands.common 87 ERROR Attempt 1: Unexpected error KeyError occurred in executing operand 5c7a3b06d448300987640036d2f5a34e in 11.238.145.234:49708 Traceback (most recent call last): File "/home/admin/work/_public-mars-0.5.5.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/_public-mars-0.5.5.zip/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 564, in execute_graph quota_request = self._prepare_quota_request(session_id, graph_key) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 249, in _prepare_quota_request memory_estimations = self._estimate_calc_memory(session_id, graph_key) File "/home/admin/work/_public-mars-0.5.5.zip/mars/worker/execution.py", line 213, in _estimate_calc_memory res = executor.execute_graph(graph_record.graph, graph_record.chunk_targets, mock=True) File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 690, in execute_graph res = graph_execution.execute(retval) File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 574, in execute return [self._chunk_results[key] for key in self._keys] File "/home/admin/work/_public-mars-0.5.5.zip/mars/executor.py", line 574, in <listcomp> return [self._chunk_results[key] for key in self._keys] KeyError: '3990ec90331559138b6ecbc6d76fbd0d'
KeyError
def execute(cls, ctx, op): def _base_concat(chunk, inputs): # auto generated concat when executing a DataFrame, Series or Index if chunk.op.output_types[0] == OutputType.dataframe: return _auto_concat_dataframe_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.series: return _auto_concat_series_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.index: return _auto_concat_index_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.categorical: return _auto_concat_categorical_chunks(chunk, inputs) else: # pragma: no cover raise TypeError( "Only DataFrameChunk, SeriesChunk, IndexChunk, " "and CategoricalChunk can be automatically concatenated" ) def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: n_rows = len(set(inp.index[0] for inp in chunk.inputs)) n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index def _auto_concat_categorical_chunks(_, inputs): if len(inputs) == 1: # pragma: no cover return inputs[0] else: # convert categorical into array arrays = [np.asarray(inp) for inp in inputs] array = np.concatenate(arrays) return pd.Categorical( array, categories=inputs[0].categories, ordered=inputs[0].ordered ) chunk = op.outputs[0] inputs = [ctx[input.key] for input in op.inputs] if isinstance(inputs[0], tuple): ctx[chunk.key] = tuple( _base_concat(chunk, [input[i] for input in inputs]) for i in range(len(inputs[0])) ) else: ctx[chunk.key] = _base_concat(chunk, inputs)
def execute(cls, ctx, op): def _base_concat(chunk, inputs): # auto generated concat when executing a DataFrame, Series or Index if chunk.op.output_types[0] == OutputType.dataframe: return _auto_concat_dataframe_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.series: return _auto_concat_series_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.index: return _auto_concat_index_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.categorical: return _auto_concat_categorical_chunks(chunk, inputs) else: # pragma: no cover raise TypeError( "Only DataFrameChunk, SeriesChunk, IndexChunk, " "and CategoricalChunk can be automatically concatenated" ) def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index def _auto_concat_categorical_chunks(_, inputs): if len(inputs) == 1: # pragma: no cover return inputs[0] else: # convert categorical into array arrays = [np.asarray(inp) for inp in inputs] array = np.concatenate(arrays) return pd.Categorical( array, categories=inputs[0].categories, ordered=inputs[0].ordered ) chunk = op.outputs[0] inputs = [ctx[input.key] for input in op.inputs] if isinstance(inputs[0], tuple): ctx[chunk.key] = tuple( _base_concat(chunk, [input[i] for input in inputs]) for i in range(len(inputs[0])) ) else: ctx[chunk.key] = _base_concat(chunk, inputs)
https://github.com/mars-project/mars/issues/1740
Error Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 646, in prepare_graph cur_chunk_graph = chunk_graph_builder.build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 347, in build chunk_graph = super().build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/Users/wenjun.swj/Code/mars/mars/context.py", line 72, in h return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 257, in tile return cls._tile_single_output(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 227, in _tile_single_output data_chunk = concat_chunks(chunks[0]) File "/Users/wenjun.swj/Code/mars/mars/learn/utils/core.py", line 33, in concat_chunks tileable = chunks[0].op.create_tileable_from_chunks(chunks) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 212, in create_tileable_from_chunks params = cls._calc_dataframe_params(chunk_index_to_chunk, chunk_shape) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in _calc_dataframe_params pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in <listcomp> pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() KeyError: (2, 0) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/tests/integrated/test_distributed_xgboost.py", line 71, in testDistributedXGBClassifier classifier.fit(X, y, eval_set=[(X, y)], session=sess, run_kwargs=run_kwargs) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/classifier.py", line 55, in fit result = train(params, dtrain, num_boost_round=self.get_num_boosting_rounds(), File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/train.py", line 200, in train ret = t.execute(session=session, **run_kwargs).fetch(session=session) File "/Users/wenjun.swj/Code/mars/mars/core.py", line 379, in execute return run() File "/Users/wenjun.swj/Code/mars/mars/core.py", line 374, in run session.run(self, **kw) File "/Users/wenjun.swj/Code/mars/mars/session.py", line 499, in run result = self._sess.run(*tileables, **kw) File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 214, in run if self._check_response_finished(graph_url, timeout_val): File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 174, in _check_response_finished raise ExecutionFailed('Graph execution failed.') from exc mars.errors.ExecutionFailed: 'Graph execution failed.'
KeyError
def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: n_rows = len(set(inp.index[0] for inp in chunk.inputs)) n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret
def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret
https://github.com/mars-project/mars/issues/1740
Error Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 646, in prepare_graph cur_chunk_graph = chunk_graph_builder.build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 347, in build chunk_graph = super().build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/Users/wenjun.swj/Code/mars/mars/context.py", line 72, in h return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 257, in tile return cls._tile_single_output(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 227, in _tile_single_output data_chunk = concat_chunks(chunks[0]) File "/Users/wenjun.swj/Code/mars/mars/learn/utils/core.py", line 33, in concat_chunks tileable = chunks[0].op.create_tileable_from_chunks(chunks) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 212, in create_tileable_from_chunks params = cls._calc_dataframe_params(chunk_index_to_chunk, chunk_shape) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in _calc_dataframe_params pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in <listcomp> pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() KeyError: (2, 0) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/tests/integrated/test_distributed_xgboost.py", line 71, in testDistributedXGBClassifier classifier.fit(X, y, eval_set=[(X, y)], session=sess, run_kwargs=run_kwargs) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/classifier.py", line 55, in fit result = train(params, dtrain, num_boost_round=self.get_num_boosting_rounds(), File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/train.py", line 200, in train ret = t.execute(session=session, **run_kwargs).fetch(session=session) File "/Users/wenjun.swj/Code/mars/mars/core.py", line 379, in execute return run() File "/Users/wenjun.swj/Code/mars/mars/core.py", line 374, in run session.run(self, **kw) File "/Users/wenjun.swj/Code/mars/mars/session.py", line 499, in run result = self._sess.run(*tileables, **kw) File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 214, in run if self._check_response_finished(graph_url, timeout_val): File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 174, in _check_response_finished raise ExecutionFailed('Graph execution failed.') from exc mars.errors.ExecutionFailed: 'Graph execution failed.'
KeyError
def _calc_dataframe_params(cls, chunk_index_to_chunks, chunk_shape): dtypes = pd.concat( [ chunk_index_to_chunks[0, i].dtypes for i in range(chunk_shape[1]) if (0, i) in chunk_index_to_chunks ] ) columns_value = parse_index(dtypes.index, store_data=True) pd_indexes = [ chunk_index_to_chunks[i, 0].index_value.to_pandas() for i in range(chunk_shape[0]) if (i, 0) in chunk_index_to_chunks ] pd_index = reduce(lambda x, y: x.append(y), pd_indexes) index_value = parse_index(pd_index) return { "dtypes": dtypes, "columns_value": columns_value, "index_value": index_value, }
def _calc_dataframe_params(cls, chunk_index_to_chunks, chunk_shape): dtypes = pd.concat( [chunk_index_to_chunks[0, i].dtypes for i in range(chunk_shape[1])] ) columns_value = parse_index(dtypes.index, store_data=True) pd_indxes = [ chunk_index_to_chunks[i, 0].index_value.to_pandas() for i in range(chunk_shape[0]) ] pd_index = reduce(lambda x, y: x.append(y), pd_indxes) index_value = parse_index(pd_index) return { "dtypes": dtypes, "columns_value": columns_value, "index_value": index_value, }
https://github.com/mars-project/mars/issues/1740
Error Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 646, in prepare_graph cur_chunk_graph = chunk_graph_builder.build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 347, in build chunk_graph = super().build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/Users/wenjun.swj/Code/mars/mars/context.py", line 72, in h return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 257, in tile return cls._tile_single_output(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 227, in _tile_single_output data_chunk = concat_chunks(chunks[0]) File "/Users/wenjun.swj/Code/mars/mars/learn/utils/core.py", line 33, in concat_chunks tileable = chunks[0].op.create_tileable_from_chunks(chunks) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 212, in create_tileable_from_chunks params = cls._calc_dataframe_params(chunk_index_to_chunk, chunk_shape) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in _calc_dataframe_params pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in <listcomp> pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() KeyError: (2, 0) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/tests/integrated/test_distributed_xgboost.py", line 71, in testDistributedXGBClassifier classifier.fit(X, y, eval_set=[(X, y)], session=sess, run_kwargs=run_kwargs) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/classifier.py", line 55, in fit result = train(params, dtrain, num_boost_round=self.get_num_boosting_rounds(), File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/train.py", line 200, in train ret = t.execute(session=session, **run_kwargs).fetch(session=session) File "/Users/wenjun.swj/Code/mars/mars/core.py", line 379, in execute return run() File "/Users/wenjun.swj/Code/mars/mars/core.py", line 374, in run session.run(self, **kw) File "/Users/wenjun.swj/Code/mars/mars/session.py", line 499, in run result = self._sess.run(*tileables, **kw) File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 214, in run if self._check_response_finished(graph_url, timeout_val): File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 174, in _check_response_finished raise ExecutionFailed('Graph execution failed.') from exc mars.errors.ExecutionFailed: 'Graph execution failed.'
KeyError
def parse_args(self, parser, argv, environ=None): args = super().parse_args(parser, argv) environ = environ or os.environ args.disable_failover = args.disable_failover or bool( int(environ.get("MARS_DISABLE_FAILOVER", "0")) ) options.scheduler.dump_graph_data = bool( int(environ.get("MARS_DUMP_GRAPH_DATA", "0")) ) return args
def parse_args(self, parser, argv, environ=None): args = super().parse_args(parser, argv) environ = environ or os.environ args.disable_failover = args.disable_failover or bool( int(environ.get("MARS_DISABLE_FAILOVER", "0")) ) return args
https://github.com/mars-project/mars/issues/1740
Error Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 646, in prepare_graph cur_chunk_graph = chunk_graph_builder.build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 347, in build chunk_graph = super().build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/Users/wenjun.swj/Code/mars/mars/context.py", line 72, in h return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 257, in tile return cls._tile_single_output(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 227, in _tile_single_output data_chunk = concat_chunks(chunks[0]) File "/Users/wenjun.swj/Code/mars/mars/learn/utils/core.py", line 33, in concat_chunks tileable = chunks[0].op.create_tileable_from_chunks(chunks) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 212, in create_tileable_from_chunks params = cls._calc_dataframe_params(chunk_index_to_chunk, chunk_shape) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in _calc_dataframe_params pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in <listcomp> pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() KeyError: (2, 0) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/tests/integrated/test_distributed_xgboost.py", line 71, in testDistributedXGBClassifier classifier.fit(X, y, eval_set=[(X, y)], session=sess, run_kwargs=run_kwargs) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/classifier.py", line 55, in fit result = train(params, dtrain, num_boost_round=self.get_num_boosting_rounds(), File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/train.py", line 200, in train ret = t.execute(session=session, **run_kwargs).fetch(session=session) File "/Users/wenjun.swj/Code/mars/mars/core.py", line 379, in execute return run() File "/Users/wenjun.swj/Code/mars/mars/core.py", line 374, in run session.run(self, **kw) File "/Users/wenjun.swj/Code/mars/mars/session.py", line 499, in run result = self._sess.run(*tileables, **kw) File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 214, in run if self._check_response_finished(graph_url, timeout_val): File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 174, in _check_response_finished raise ExecutionFailed('Graph execution failed.') from exc mars.errors.ExecutionFailed: 'Graph execution failed.'
KeyError
def add_finished_predecessor( self, op_key, worker, output_sizes=None, output_shapes=None ): super().add_finished_predecessor( op_key, worker, output_sizes=output_sizes, output_shapes=output_shapes ) from ..chunkmeta import WorkerMeta chunk_key = next(iter(output_sizes.keys()))[0] self._mapper_op_to_chunk[op_key] = chunk_key if op_key not in self._worker_to_mappers[worker]: self._worker_to_mappers[worker].add(op_key) self.chunk_meta.add_worker(self._session_id, chunk_key, worker, _tell=True) shuffle_keys_to_op = self._shuffle_keys_to_op if not self._reducer_workers: self._reducer_workers = self._graph_refs[0].assign_operand_workers( self._succ_keys, input_chunk_metas=self._reducer_to_mapper ) reducer_workers = self._reducer_workers data_to_addresses = dict() unused_keys = [] for (chunk_key, shuffle_key), data_size in output_sizes.items() or (): if shuffle_key not in shuffle_keys_to_op: # outputs may be pruned, hence those keys become useless unused_keys.append((chunk_key, shuffle_key)) continue succ_op_key = shuffle_keys_to_op[shuffle_key] meta = self._reducer_to_mapper[succ_op_key][op_key] = WorkerMeta( chunk_size=data_size, workers=(worker,), chunk_shape=output_shapes.get((chunk_key, shuffle_key)), ) reducer_worker = reducer_workers.get(succ_op_key) if reducer_worker and reducer_worker != worker: data_to_addresses[(chunk_key, shuffle_key)] = [reducer_worker] meta.workers += (reducer_worker,) if unused_keys: self._free_data_in_worker(unused_keys, [(worker,)] * len(unused_keys)) if data_to_addresses: try: with rewrite_worker_errors(): self._get_raw_execution_ref(address=worker).send_data_to_workers( self._session_id, data_to_addresses, _tell=True ) except WorkerDead: self._resource_ref.detach_dead_workers([worker], _tell=True) if all(k in self._finish_preds for k in self._pred_keys): self._start_successors()
def add_finished_predecessor( self, op_key, worker, output_sizes=None, output_shapes=None ): super().add_finished_predecessor( op_key, worker, output_sizes=output_sizes, output_shapes=output_shapes ) from ..chunkmeta import WorkerMeta chunk_key = next(iter(output_sizes.keys()))[0] self._mapper_op_to_chunk[op_key] = chunk_key if op_key not in self._worker_to_mappers[worker]: self._worker_to_mappers[worker].add(op_key) self.chunk_meta.add_worker(self._session_id, chunk_key, worker, _tell=True) shuffle_keys_to_op = self._shuffle_keys_to_op if not self._reducer_workers: self._reducer_workers = self._graph_refs[0].assign_operand_workers( self._succ_keys, input_chunk_metas=self._reducer_to_mapper ) reducer_workers = self._reducer_workers data_to_addresses = dict() for (chunk_key, shuffle_key), data_size in output_sizes.items() or (): succ_op_key = shuffle_keys_to_op[shuffle_key] meta = self._reducer_to_mapper[succ_op_key][op_key] = WorkerMeta( chunk_size=data_size, workers=(worker,), chunk_shape=output_shapes.get((chunk_key, shuffle_key)), ) reducer_worker = reducer_workers.get(succ_op_key) if reducer_worker and reducer_worker != worker: data_to_addresses[(chunk_key, shuffle_key)] = [reducer_worker] meta.workers += (reducer_worker,) if data_to_addresses: try: with rewrite_worker_errors(): self._get_raw_execution_ref(address=worker).send_data_to_workers( self._session_id, data_to_addresses, _tell=True ) except WorkerDead: self._resource_ref.detach_dead_workers([worker], _tell=True) if all(k in self._finish_preds for k in self._pred_keys): self._start_successors()
https://github.com/mars-project/mars/issues/1740
Error Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 377, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 646, in prepare_graph cur_chunk_graph = chunk_graph_builder.build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 347, in build chunk_graph = super().build( File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/Users/wenjun.swj/Code/mars/mars/context.py", line 72, in h return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 451, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 257, in tile return cls._tile_single_output(op) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/dmatrix.py", line 227, in _tile_single_output data_chunk = concat_chunks(chunks[0]) File "/Users/wenjun.swj/Code/mars/mars/learn/utils/core.py", line 33, in concat_chunks tileable = chunks[0].op.create_tileable_from_chunks(chunks) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 212, in create_tileable_from_chunks params = cls._calc_dataframe_params(chunk_index_to_chunk, chunk_shape) File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in _calc_dataframe_params pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() File "/Users/wenjun.swj/Code/mars/mars/dataframe/operands.py", line 228, in <listcomp> pd_indxes = [chunk_index_to_chunks[i, 0].index_value.to_pandas() KeyError: (2, 0) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun.swj/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/tests/integrated/test_distributed_xgboost.py", line 71, in testDistributedXGBClassifier classifier.fit(X, y, eval_set=[(X, y)], session=sess, run_kwargs=run_kwargs) File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/classifier.py", line 55, in fit result = train(params, dtrain, num_boost_round=self.get_num_boosting_rounds(), File "/Users/wenjun.swj/Code/mars/mars/learn/contrib/xgboost/train.py", line 200, in train ret = t.execute(session=session, **run_kwargs).fetch(session=session) File "/Users/wenjun.swj/Code/mars/mars/core.py", line 379, in execute return run() File "/Users/wenjun.swj/Code/mars/mars/core.py", line 374, in run session.run(self, **kw) File "/Users/wenjun.swj/Code/mars/mars/session.py", line 499, in run result = self._sess.run(*tileables, **kw) File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 214, in run if self._check_response_finished(graph_url, timeout_val): File "/Users/wenjun.swj/Code/mars/mars/web/session.py", line 174, in _check_response_finished raise ExecutionFailed('Graph execution failed.') from exc mars.errors.ExecutionFailed: 'Graph execution failed.'
KeyError
def all(a, axis=None, out=None, keepdims=None, combine_size=None): """ Test whether all array elements along a given axis evaluate to True. Parameters ---------- a : array_like Input tensor or object that can be converted to a tensor. axis : None or int or tuple of ints, optional Axis or axes along which a logical AND reduction is performed. The default (`axis` = `None`) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : Tensor, optional Alternate output tensor in which to place the result. It must have the same shape as the expected output and its type is preserved (e.g., if ``dtype(out)`` is float, the result will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input tensor. If the default value is passed, then `keepdims` will not be passed through to the `all` method of sub-classes of `ndarray`, however any non-default value will be. If the sub-classes `sum` method does not implement `keepdims` any exceptions will be raised. combine_size: int, optional The number of chunks to combine. Returns ------- all : Tensor, bool A new boolean or tensor is returned unless `out` is specified, in which case a reference to `out` is returned. See Also -------- Tensor.all : equivalent method any : Test whether any element along a given axis evaluates to True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to `True` because these are not equal to zero. Examples -------- >>> import mars.tensor as mt >>> mt.all([[True,False],[True,True]]).execute() False >>> mt.all([[True,False],[True,True]], axis=0).execute() array([ True, False]) >>> mt.all([-1, 4, 5]).execute() True >>> mt.all([1.0, mt.nan]).execute() True """ a = astensor(a) if a.dtype == np.object_: dtype = a.dtype else: dtype = np.dtype(bool) op = TensorAll(axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size) return op(a, out=out)
def all(a, axis=None, out=None, keepdims=None, combine_size=None): """ Test whether all array elements along a given axis evaluate to True. Parameters ---------- a : array_like Input tensor or object that can be converted to a tensor. axis : None or int or tuple of ints, optional Axis or axes along which a logical AND reduction is performed. The default (`axis` = `None`) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : Tensor, optional Alternate output tensor in which to place the result. It must have the same shape as the expected output and its type is preserved (e.g., if ``dtype(out)`` is float, the result will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section "Output arguments") for more details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input tensor. If the default value is passed, then `keepdims` will not be passed through to the `all` method of sub-classes of `ndarray`, however any non-default value will be. If the sub-classes `sum` method does not implement `keepdims` any exceptions will be raised. combine_size: int, optional The number of chunks to combine. Returns ------- all : Tensor, bool A new boolean or tensor is returned unless `out` is specified, in which case a reference to `out` is returned. See Also -------- Tensor.all : equivalent method any : Test whether any element along a given axis evaluates to True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to `True` because these are not equal to zero. Examples -------- >>> import mars.tensor as mt >>> mt.all([[True,False],[True,True]]).execute() False >>> mt.all([[True,False],[True,True]], axis=0).execute() array([ True, False]) >>> mt.all([-1, 4, 5]).execute() True >>> mt.all([1.0, mt.nan]).execute() True """ a = astensor(a) op = TensorAll( axis=axis, dtype=np.dtype(bool), keepdims=keepdims, combine_size=combine_size ) return op(a, out=out)
https://github.com/mars-project/mars/issues/1743
In [5]: a = mt.tensor(['a', 'b', 'c'], dtype=object) In [6]: a.max().execute() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-6-d9ebfaf2dc7b> in <module> ----> 1 a.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 641 642 if wait: --> 643 return run() 644 else: 645 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 637 638 def run(): --> 639 self.data.execute(session, **kw) 640 return self 641 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 377 378 if wait: --> 379 return run() 380 else: 381 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 372 def run(): 373 # no more fetch, thus just fire run --> 374 session.run(self, **kw) 375 # return Tileable or ExecutableTuple itself 376 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 497 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 498 for t in tileables) --> 499 result = self._sess.run(*tileables, **kw) 500 501 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/tensor/reduction/core.py in execute(cls, ctx, op) 288 return cls.execute_agg(ctx, op) 289 else: --> 290 return cls.execute_one_chunk(ctx, op) 291 292 ~/Workspace/mars/mars/tensor/reduction/core.py in execute_one_chunk(cls, ctx, op) 277 @classmethod 278 def execute_one_chunk(cls, ctx, op): --> 279 cls.execute_agg(ctx, op) 280 281 @classmethod ~/Workspace/mars/mars/tensor/reduction/core.py in execute_agg(cls, ctx, op) 273 keepdims=bool(op.keepdims)) 274 --> 275 ctx[out.key] = ret.astype(op.dtype, order=out.order.value, copy=False) 276 277 @classmethod AttributeError: 'str' object has no attribute 'astype'
AttributeError
def any(a, axis=None, out=None, keepdims=None, combine_size=None): """ Test whether any tensor element along a given axis evaluates to True. Returns single boolean unless `axis` is not ``None`` Parameters ---------- a : array_like Input tensor or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical OR reduction is performed. The default (`axis` = `None`) is to perform a logical OR over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : Tensor, optional Alternate output tensor in which to place the result. It must have the same shape as the expected output and its type is preserved (e.g., if it is of type float, then it will remain so, returning 1.0 for True and 0.0 for False, regardless of the type of `a`). See `doc.ufuncs` (Section "Output arguments") for details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input tensor. If the default value is passed, then `keepdims` will not be passed through to the `any` method of sub-classes of `Tensor`, however any non-default value will be. If the sub-classes `sum` method does not implement `keepdims` any exceptions will be raised. combine_size: int, optional The number of chunks to combine. Returns ------- any : bool or Tensor A new boolean or `Tensor` is returned unless `out` is specified, in which case a reference to `out` is returned. See Also -------- Tensor.any : equivalent method all : Test whether all elements along a given axis evaluate to True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to `True` because these are not equal to zero. Examples -------- >>> import mars.tensor as mt >>> mt.any([[True, False], [True, True]]).execute() True >>> mt.any([[True, False], [False, False]], axis=0).execute() array([ True, False]) >>> mt.any([-1, 0, 5]).execute() True >>> mt.any(mt.nan).execute() True """ a = astensor(a) if a.dtype == np.object_: dtype = a.dtype else: dtype = np.dtype(bool) op = TensorAny(axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size) return op(a, out=out)
def any(a, axis=None, out=None, keepdims=None, combine_size=None): """ Test whether any tensor element along a given axis evaluates to True. Returns single boolean unless `axis` is not ``None`` Parameters ---------- a : array_like Input tensor or object that can be converted to an array. axis : None or int or tuple of ints, optional Axis or axes along which a logical OR reduction is performed. The default (`axis` = `None`) is to perform a logical OR over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : Tensor, optional Alternate output tensor in which to place the result. It must have the same shape as the expected output and its type is preserved (e.g., if it is of type float, then it will remain so, returning 1.0 for True and 0.0 for False, regardless of the type of `a`). See `doc.ufuncs` (Section "Output arguments") for details. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input tensor. If the default value is passed, then `keepdims` will not be passed through to the `any` method of sub-classes of `Tensor`, however any non-default value will be. If the sub-classes `sum` method does not implement `keepdims` any exceptions will be raised. combine_size: int, optional The number of chunks to combine. Returns ------- any : bool or Tensor A new boolean or `Tensor` is returned unless `out` is specified, in which case a reference to `out` is returned. See Also -------- Tensor.any : equivalent method all : Test whether all elements along a given axis evaluate to True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to `True` because these are not equal to zero. Examples -------- >>> import mars.tensor as mt >>> mt.any([[True, False], [True, True]]).execute() True >>> mt.any([[True, False], [False, False]], axis=0).execute() array([ True, False]) >>> mt.any([-1, 0, 5]).execute() True >>> mt.any(mt.nan).execute() True """ a = astensor(a) op = TensorAny( axis=axis, dtype=np.dtype(bool), keepdims=keepdims, combine_size=combine_size ) return op(a, out=out)
https://github.com/mars-project/mars/issues/1743
In [5]: a = mt.tensor(['a', 'b', 'c'], dtype=object) In [6]: a.max().execute() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-6-d9ebfaf2dc7b> in <module> ----> 1 a.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 641 642 if wait: --> 643 return run() 644 else: 645 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 637 638 def run(): --> 639 self.data.execute(session, **kw) 640 return self 641 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 377 378 if wait: --> 379 return run() 380 else: 381 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 372 def run(): 373 # no more fetch, thus just fire run --> 374 session.run(self, **kw) 375 # return Tileable or ExecutableTuple itself 376 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 497 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 498 for t in tileables) --> 499 result = self._sess.run(*tileables, **kw) 500 501 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/tensor/reduction/core.py in execute(cls, ctx, op) 288 return cls.execute_agg(ctx, op) 289 else: --> 290 return cls.execute_one_chunk(ctx, op) 291 292 ~/Workspace/mars/mars/tensor/reduction/core.py in execute_one_chunk(cls, ctx, op) 277 @classmethod 278 def execute_one_chunk(cls, ctx, op): --> 279 cls.execute_agg(ctx, op) 280 281 @classmethod ~/Workspace/mars/mars/tensor/reduction/core.py in execute_agg(cls, ctx, op) 273 keepdims=bool(op.keepdims)) 274 --> 275 ctx[out.key] = ret.astype(op.dtype, order=out.order.value, copy=False) 276 277 @classmethod AttributeError: 'str' object has no attribute 'astype'
AttributeError
def execute_agg(cls, ctx, op): (input_chunk,), device_id, xp = as_same_device( [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True ) axis = cls.get_axis(op.axis) func_name = getattr(cls, "_func_name", None) reduce_func = getattr(xp, func_name) out = op.outputs[0] with device(device_id): if "dtype" in inspect.getfullargspec(reduce_func).args: ret = reduce_func( input_chunk, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims) ) else: ret = reduce_func(input_chunk, axis=axis, keepdims=bool(op.keepdims)) if hasattr(ret, "astype"): # for non-object dtype ret = ret.astype(op.dtype, order=out.order.value, copy=False) ctx[out.key] = ret
def execute_agg(cls, ctx, op): (input_chunk,), device_id, xp = as_same_device( [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True ) axis = cls.get_axis(op.axis) func_name = getattr(cls, "_func_name", None) reduce_func = getattr(xp, func_name) out = op.outputs[0] with device(device_id): if "dtype" in inspect.getfullargspec(reduce_func).args: ret = reduce_func( input_chunk, axis=axis, dtype=op.dtype, keepdims=bool(op.keepdims) ) else: ret = reduce_func(input_chunk, axis=axis, keepdims=bool(op.keepdims)) ctx[out.key] = ret.astype(op.dtype, order=out.order.value, copy=False)
https://github.com/mars-project/mars/issues/1743
In [5]: a = mt.tensor(['a', 'b', 'c'], dtype=object) In [6]: a.max().execute() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-6-d9ebfaf2dc7b> in <module> ----> 1 a.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 641 642 if wait: --> 643 return run() 644 else: 645 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 637 638 def run(): --> 639 self.data.execute(session, **kw) 640 return self 641 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 377 378 if wait: --> 379 return run() 380 else: 381 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 372 def run(): 373 # no more fetch, thus just fire run --> 374 session.run(self, **kw) 375 # return Tileable or ExecutableTuple itself 376 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 497 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 498 for t in tileables) --> 499 result = self._sess.run(*tileables, **kw) 500 501 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/tensor/reduction/core.py in execute(cls, ctx, op) 288 return cls.execute_agg(ctx, op) 289 else: --> 290 return cls.execute_one_chunk(ctx, op) 291 292 ~/Workspace/mars/mars/tensor/reduction/core.py in execute_one_chunk(cls, ctx, op) 277 @classmethod 278 def execute_one_chunk(cls, ctx, op): --> 279 cls.execute_agg(ctx, op) 280 281 @classmethod ~/Workspace/mars/mars/tensor/reduction/core.py in execute_agg(cls, ctx, op) 273 keepdims=bool(op.keepdims)) 274 --> 275 ctx[out.key] = ret.astype(op.dtype, order=out.order.value, copy=False) 276 277 @classmethod AttributeError: 'str' object has no attribute 'astype'
AttributeError
def execute_map(cls, ctx, op): arg_axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim) (in_chunk,), device_id, xp = as_same_device( [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True ) func_name = getattr(cls, "_func_name") agg_func_name = getattr(cls, "_agg_func_name") arg_func = getattr(xp, func_name) agg_func_name = getattr(xp, agg_func_name) offset = op.offset chunk = op.outputs[0] with device(device_id): vals = agg_func_name(in_chunk, axis=arg_axis) if hasattr(vals, "reshape"): vals = vals.reshape(chunk.shape) try: arg = arg_func(in_chunk, axis=arg_axis) if hasattr(arg, "reshape"): arg = arg.reshape(chunk.shape) except ValueError: # handle all NaN arg = arg_func( xp.where(xp.isnan(in_chunk), np.inf, in_chunk), axis=arg_axis ).reshape(chunk.shape) if arg_axis is None: if xp == cp: # we need to copy to do cpu computation, then copy back to gpu # cuz unravel_index and ravel_multi_index are not implemented in cupy in_chunk = in_chunk.get() total_shape = op.total_shape ind = np.unravel_index(arg.ravel()[0], in_chunk.shape) total_ind = tuple(o + i for (o, i) in zip(offset, ind)) res = np.ravel_multi_index(total_ind, total_shape) if xp == cp: # copy back with xp.cuda.Device(in_chunk.device.id): arg[:] = xp.asarray(res) else: arg[:] = res else: arg += offset ctx[op.outputs[0].key] = (vals, arg)
def execute_map(cls, ctx, op): arg_axis = cls.get_arg_axis(op.axis, op.inputs[0].ndim) (in_chunk,), device_id, xp = as_same_device( [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True ) func_name = getattr(cls, "_func_name") agg_func_name = getattr(cls, "_agg_func_name") arg_func = getattr(xp, func_name) agg_func_name = getattr(xp, agg_func_name) offset = op.offset chunk = op.outputs[0] with device(device_id): vals = agg_func_name(in_chunk, axis=arg_axis).reshape(chunk.shape) try: arg = arg_func(in_chunk, axis=arg_axis).reshape(chunk.shape) except ValueError: # handle all NaN arg = arg_func( xp.where(xp.isnan(in_chunk), np.inf, in_chunk), axis=arg_axis ).reshape(chunk.shape) if arg_axis is None: if xp == cp: # we need to copy to do cpu computation, then copy back to gpu # cuz unravel_index and ravel_multi_index are not implemented in cupy in_chunk = in_chunk.get() total_shape = op.total_shape ind = np.unravel_index(arg.ravel()[0], in_chunk.shape) total_ind = tuple(o + i for (o, i) in zip(offset, ind)) res = np.ravel_multi_index(total_ind, total_shape) if xp == cp: # copy back with xp.cuda.Device(in_chunk.device.id): arg[:] = xp.asarray(res) else: arg[:] = res else: arg += offset ctx[op.outputs[0].key] = (vals, arg)
https://github.com/mars-project/mars/issues/1743
In [5]: a = mt.tensor(['a', 'b', 'c'], dtype=object) In [6]: a.max().execute() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-6-d9ebfaf2dc7b> in <module> ----> 1 a.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 641 642 if wait: --> 643 return run() 644 else: 645 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 637 638 def run(): --> 639 self.data.execute(session, **kw) 640 return self 641 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 377 378 if wait: --> 379 return run() 380 else: 381 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 372 def run(): 373 # no more fetch, thus just fire run --> 374 session.run(self, **kw) 375 # return Tileable or ExecutableTuple itself 376 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 497 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 498 for t in tileables) --> 499 result = self._sess.run(*tileables, **kw) 500 501 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/tensor/reduction/core.py in execute(cls, ctx, op) 288 return cls.execute_agg(ctx, op) 289 else: --> 290 return cls.execute_one_chunk(ctx, op) 291 292 ~/Workspace/mars/mars/tensor/reduction/core.py in execute_one_chunk(cls, ctx, op) 277 @classmethod 278 def execute_one_chunk(cls, ctx, op): --> 279 cls.execute_agg(ctx, op) 280 281 @classmethod ~/Workspace/mars/mars/tensor/reduction/core.py in execute_agg(cls, ctx, op) 273 keepdims=bool(op.keepdims)) 274 --> 275 ctx[out.key] = ret.astype(op.dtype, order=out.order.value, copy=False) 276 277 @classmethod AttributeError: 'str' object has no attribute 'astype'
AttributeError
def tile(cls, op): from ..indexing.slice import TensorSlice in_tensor = op.inputs[0] out_tensor = op.outputs[0] axis = op.axis if not isinstance(axis, int): raise ValueError("axis must be a integer") axis = validate_axis(in_tensor.ndim, axis) if axis is None: raise NotImplementedError op_type, bin_op_type = getattr(op, "_get_op_types")() chunks = [] for c in in_tensor.chunks: chunk_op = op_type(axis=op.axis, dtype=op.dtype) chunks.append( chunk_op.new_chunk( [c], shape=c.shape, index=c.index, order=out_tensor.order ) ) inter_tensor = copy.copy(in_tensor) inter_tensor._chunks = chunks slc = tuple( slice(None) if i != axis else slice(-1, None) for i in range(in_tensor.ndim) ) output_chunks = [] for chunk in chunks: if chunk.index[axis] == 0: output_chunks.append(chunk) continue to_cum_chunks = [] for i in range(chunk.index[axis]): to_cum_index = chunk.index[:axis] + (i,) + chunk.index[axis + 1 :] shape = chunk.shape[:axis] + (1,) + chunk.shape[axis + 1 :] to_cum_chunk = inter_tensor.cix[to_cum_index] slice_op = TensorSlice(slices=slc, dtype=chunk.dtype) sliced_chunk = slice_op.new_chunk( [to_cum_chunk], shape=shape, index=to_cum_index, order=out_tensor.order ) to_cum_chunks.append(sliced_chunk) to_cum_chunks.append(chunk) bin_op = bin_op_type(dtype=chunk.dtype) output_chunk = bin_op.new_chunk( to_cum_chunks, shape=chunk.shape, index=chunk.index, order=out_tensor.order ) output_chunks.append(output_chunk) new_op = op.copy() return new_op.new_tensors( op.inputs, in_tensor.shape, order=out_tensor.order, chunks=output_chunks, nsplits=in_tensor.nsplits, )
def tile(cls, op): from ..indexing.slice import TensorSlice in_tensor = op.inputs[0] out_tensor = op.outputs[0] axis = op.axis if not isinstance(axis, int): raise ValueError("axis must be a integer") axis = validate_axis(in_tensor.ndim, axis) if axis is None: raise NotImplementedError op_type, bin_op_type = getattr(op, "_get_op_types")() chunks = [] for c in in_tensor.chunks: chunk_op = op_type(axis=op.axis, dtype=op.dtype) chunks.append( chunk_op.new_chunk( [c], shape=c.shape, index=c.index, order=out_tensor.order ) ) inter_tensor = copy.copy(in_tensor) inter_tensor._chunks = chunks slc = tuple( slice(None) if i != axis else slice(-1, None) for i in range(in_tensor.ndim) ) output_chunks = [] for chunk in chunks: if chunk.index[axis] == 0: output_chunks.append(chunk) continue to_cum_chunks = [chunk] for i in range(chunk.index[axis]): to_cum_index = chunk.index[:axis] + (i,) + chunk.index[axis + 1 :] shape = chunk.shape[:axis] + (1,) + chunk.shape[axis + 1 :] to_cum_chunk = inter_tensor.cix[to_cum_index] slice_op = TensorSlice(slices=slc, dtype=chunk.dtype) sliced_chunk = slice_op.new_chunk( [to_cum_chunk], shape=shape, index=to_cum_index, order=out_tensor.order ) to_cum_chunks.append(sliced_chunk) bin_op = bin_op_type(dtype=chunk.dtype) output_chunk = bin_op.new_chunk( to_cum_chunks, shape=chunk.shape, index=chunk.index, order=out_tensor.order ) output_chunks.append(output_chunk) new_op = op.copy() return new_op.new_tensors( op.inputs, in_tensor.shape, order=out_tensor.order, chunks=output_chunks, nsplits=in_tensor.nsplits, )
https://github.com/mars-project/mars/issues/1743
In [5]: a = mt.tensor(['a', 'b', 'c'], dtype=object) In [6]: a.max().execute() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-6-d9ebfaf2dc7b> in <module> ----> 1 a.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 641 642 if wait: --> 643 return run() 644 else: 645 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 637 638 def run(): --> 639 self.data.execute(session, **kw) 640 return self 641 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 377 378 if wait: --> 379 return run() 380 else: 381 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 372 def run(): 373 # no more fetch, thus just fire run --> 374 session.run(self, **kw) 375 # return Tileable or ExecutableTuple itself 376 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 497 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 498 for t in tileables) --> 499 result = self._sess.run(*tileables, **kw) 500 501 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/tensor/reduction/core.py in execute(cls, ctx, op) 288 return cls.execute_agg(ctx, op) 289 else: --> 290 return cls.execute_one_chunk(ctx, op) 291 292 ~/Workspace/mars/mars/tensor/reduction/core.py in execute_one_chunk(cls, ctx, op) 277 @classmethod 278 def execute_one_chunk(cls, ctx, op): --> 279 cls.execute_agg(ctx, op) 280 281 @classmethod ~/Workspace/mars/mars/tensor/reduction/core.py in execute_agg(cls, ctx, op) 273 keepdims=bool(op.keepdims)) 274 --> 275 ctx[out.key] = ret.astype(op.dtype, order=out.order.value, copy=False) 276 277 @classmethod AttributeError: 'str' object has no attribute 'astype'
AttributeError
def sum(a, axis=None, dtype=None, out=None, keepdims=None, combine_size=None): """ Sum of tensor elements over a given axis. Parameters ---------- a : array_like Elements to sum. axis : None or int or tuple of ints, optional Axis or axes along which a sum is performed. The default, axis=None, will sum all of the elements of the input tensor. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. dtype : dtype, optional The type of the returned tensor and of the accumulator in which the elements are summed. The dtype of `a` is used by default unless `a` has an integer dtype of less precision than the default platform integer. In that case, if `a` is signed then the platform integer is used while if `a` is unsigned then an unsigned integer of the same precision as the platform integer is used. out : Tensor, optional Alternative output tensor in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input tensor. If the default value is passed, then `keepdims` will not be passed through to the `sum` method of sub-classes of `Tensor`, however any non-default value will be. If the sub-classes `sum` method does not implement `keepdims` any exceptions will be raised. combine_size: int, optional The number of chunks to combine. Returns ------- sum_along_axis : Tensor An array with the same shape as `a`, with the specified axis removed. If `a` is a 0-d tensor, or if `axis` is None, a scalar is returned. If an output array is specified, a reference to `out` is returned. See Also -------- Tensor.sum : Equivalent method. cumsum : Cumulative sum of tensor elements. trapz : Integration of tensor values using the composite trapezoidal rule. mean, average Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. The sum of an empty array is the neutral element 0: >>> import mars.tensor as mt >>> mt.sum([]).execute() 0.0 Examples -------- >>> mt.sum([0.5, 1.5]).execute() 2.0 >>> mt.sum([0.5, 0.7, 0.2, 1.5], dtype=mt.int32).execute() 1 >>> mt.sum([[0, 1], [0, 5]]).execute() 6 >>> mt.sum([[0, 1], [0, 5]], axis=0).execute() array([0, 6]) >>> mt.sum([[0, 1], [0, 5]], axis=1).execute() array([1, 5]) If the accumulator is too small, overflow occurs: >>> mt.ones(128, dtype=mt.int8).sum(dtype=mt.int8).execute() -128 """ a = astensor(a) if dtype is None: if a.dtype == np.object_: dtype = a.dtype else: dtype = np.empty((1,), dtype=a.dtype).sum().dtype else: dtype = np.dtype(dtype) op = TensorSum(axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size) return op(a, out=out)
def sum(a, axis=None, dtype=None, out=None, keepdims=None, combine_size=None): """ Sum of tensor elements over a given axis. Parameters ---------- a : array_like Elements to sum. axis : None or int or tuple of ints, optional Axis or axes along which a sum is performed. The default, axis=None, will sum all of the elements of the input tensor. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. dtype : dtype, optional The type of the returned tensor and of the accumulator in which the elements are summed. The dtype of `a` is used by default unless `a` has an integer dtype of less precision than the default platform integer. In that case, if `a` is signed then the platform integer is used while if `a` is unsigned then an unsigned integer of the same precision as the platform integer is used. out : Tensor, optional Alternative output tensor in which to place the result. It must have the same shape as the expected output, but the type of the output values will be cast if necessary. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input tensor. If the default value is passed, then `keepdims` will not be passed through to the `sum` method of sub-classes of `Tensor`, however any non-default value will be. If the sub-classes `sum` method does not implement `keepdims` any exceptions will be raised. combine_size: int, optional The number of chunks to combine. Returns ------- sum_along_axis : Tensor An array with the same shape as `a`, with the specified axis removed. If `a` is a 0-d tensor, or if `axis` is None, a scalar is returned. If an output array is specified, a reference to `out` is returned. See Also -------- Tensor.sum : Equivalent method. cumsum : Cumulative sum of tensor elements. trapz : Integration of tensor values using the composite trapezoidal rule. mean, average Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. The sum of an empty array is the neutral element 0: >>> import mars.tensor as mt >>> mt.sum([]).execute() 0.0 Examples -------- >>> mt.sum([0.5, 1.5]).execute() 2.0 >>> mt.sum([0.5, 0.7, 0.2, 1.5], dtype=mt.int32).execute() 1 >>> mt.sum([[0, 1], [0, 5]]).execute() 6 >>> mt.sum([[0, 1], [0, 5]], axis=0).execute() array([0, 6]) >>> mt.sum([[0, 1], [0, 5]], axis=1).execute() array([1, 5]) If the accumulator is too small, overflow occurs: >>> mt.ones(128, dtype=mt.int8).sum(dtype=mt.int8).execute() -128 """ a = astensor(a) if dtype is None: dtype = np.empty((1,), dtype=a.dtype).sum().dtype else: dtype = np.dtype(dtype) op = TensorSum(axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size) return op(a, out=out)
https://github.com/mars-project/mars/issues/1743
In [5]: a = mt.tensor(['a', 'b', 'c'], dtype=object) In [6]: a.max().execute() --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-6-d9ebfaf2dc7b> in <module> ----> 1 a.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 641 642 if wait: --> 643 return run() 644 else: 645 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 637 638 def run(): --> 639 self.data.execute(session, **kw) 640 return self 641 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 377 378 if wait: --> 379 return run() 380 else: 381 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 372 def run(): 373 # no more fetch, thus just fire run --> 374 session.run(self, **kw) 375 # return Tileable or ExecutableTuple itself 376 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 497 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 498 for t in tileables) --> 499 result = self._sess.run(*tileables, **kw) 500 501 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/tensor/reduction/core.py in execute(cls, ctx, op) 288 return cls.execute_agg(ctx, op) 289 else: --> 290 return cls.execute_one_chunk(ctx, op) 291 292 ~/Workspace/mars/mars/tensor/reduction/core.py in execute_one_chunk(cls, ctx, op) 277 @classmethod 278 def execute_one_chunk(cls, ctx, op): --> 279 cls.execute_agg(ctx, op) 280 281 @classmethod ~/Workspace/mars/mars/tensor/reduction/core.py in execute_agg(cls, ctx, op) 273 keepdims=bool(op.keepdims)) 274 --> 275 ctx[out.key] = ret.astype(op.dtype, order=out.order.value, copy=False) 276 277 @classmethod AttributeError: 'str' object has no attribute 'astype'
AttributeError
def read_csv( path, names=None, sep=",", index_col=None, compression=None, header="infer", dtype=None, usecols=None, nrows=None, chunk_bytes="64M", gpu=None, head_bytes="100k", head_lines=None, incremental_index=False, use_arrow_dtype=None, storage_options=None, **kwargs, ): r""" Read a comma-separated values (csv) file into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Parameters ---------- path : str Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv, you can alos read from external resources using a URL like: hdfs://localhost:8020/test.csv. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. sep : str, default ',' Delimiter to use. If sep is None, the C engine cannot automatically detect the separator, but the Python parsing engine can, meaning the latter will be used and automatically detect the separator by Python's builtin sniffer tool, ``csv.Sniffer``. In addition, separators longer than 1 character and different from ``'\s+'`` will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``. delimiter : str, default ``None`` Alias for sep. header : int, list of int, default 'infer' Row number(s) to use as the column names, and the start of the data. Default behavior is to infer the column names: if no names are passed the behavior is identical to ``header=0`` and column names are inferred from the first line of the file, if column names are passed explicitly then the behavior is identical to ``header=None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not specified will be skipped (e.g. 2 in this example is skipped). Note that this parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so ``header=0`` denotes the first line of data rather than the first line of the file. names : array-like, optional List of column names to use. If the file contains a header row, then you should explicitly pass ``header=0`` to override the column names. Duplicates in this list are not allowed. index_col : int, str, sequence of int / str, or False, default ``None`` Column(s) to use as the row labels of the ``DataFrame``, either given as string name or column index. If a sequence of int / str is given, a MultiIndex is used. Note: ``index_col=False`` can be used to force pandas to *not* use the first column as the index, e.g. when you have a malformed file with delimiters at the end of each line. usecols : list-like or callable, optional Return a subset of the columns. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid list-like `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To instantiate a DataFrame from ``data`` with element order preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns in ``['foo', 'bar']`` order or ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` for ``['bar', 'foo']`` order. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to True. An example of a valid callable argument would be ``lambda x: x.upper() in ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster parsing time and lower memory usage. squeeze : bool, default False If the parsed data only contains one column then return a Series. prefix : str, optional Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ... mangle_dupe_cols : bool, default True Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. dtype : Type name or dict of column -> type, optional Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32, 'c': 'Int64'} Use `str` or `object` together with suitable `na_values` settings to preserve and not interpret dtype. If converters are specified, they will be applied INSTEAD of dtype conversion. engine : {'c', 'python'}, optional Parser engine to use. The C engine is faster while the python engine is currently more feature-complete. converters : dict, optional Dict of functions for converting values in certain columns. Keys can either be integers or column labels. true_values : list, optional Values to consider as True. false_values : list, optional Values to consider as False. skipinitialspace : bool, default False Skip spaces after delimiter. skiprows : list-like, int or callable, optional Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file. If callable, the callable function will be evaluated against the row indices, returning True if the row should be skipped and False otherwise. An example of a valid callable argument would be ``lambda x: x in [0, 2]``. skipfooter : int, default 0 Number of lines at bottom of file to skip (Unsupported with engine='c'). nrows : int, optional Number of rows of file to read. Useful for reading pieces of large files. na_values : scalar, str, list-like, or dict, optional Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted as NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'. keep_default_na : bool, default True Whether or not to include the default NaN values when parsing the data. Depending on whether `na_values` is passed in, the behavior is as follows: * If `keep_default_na` is True, and `na_values` are specified, `na_values` is appended to the default NaN values used for parsing. * If `keep_default_na` is True, and `na_values` are not specified, only the default NaN values are used for parsing. * If `keep_default_na` is False, and `na_values` are specified, only the NaN values specified `na_values` are used for parsing. * If `keep_default_na` is False, and `na_values` are not specified, no strings will be parsed as NaN. Note that if `na_filter` is passed in as False, the `keep_default_na` and `na_values` parameters will be ignored. na_filter : bool, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file. verbose : bool, default False Indicate number of NA values placed in non-numeric columns. skip_blank_lines : bool, default True If True, skip over blank lines rather than interpreting as NaN values. parse_dates : bool or list of int or names or list of lists or dict, default False The behavior is as follows: * boolean. If True -> try parsing the index. * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result 'foo' If a column or index cannot be represented as an array of datetimes, say because of an unparseable value or a mixture of timezones, the column or index will be returned unaltered as an object data type. For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``. To parse an index or column with a mixture of timezones, specify ``date_parser`` to be a partially-applied :func:`pandas.to_datetime` with ``utc=True``. See :ref:`io.csv.mixed_timezones` for more. Note: A fast-path exists for iso8601-formatted dates. infer_datetime_format : bool, default False If True and `parse_dates` is enabled, pandas will attempt to infer the format of the datetime strings in the columns, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by 5-10x. keep_date_col : bool, default False If True and `parse_dates` specifies combining multiple columns then keep the original columns. date_parser : function, optional Function to use for converting a sequence of string columns to an array of datetime instances. The default uses ``dateutil.parser.parser`` to do the conversion. Pandas will try to call `date_parser` in three different ways, advancing to the next if an exception occurs: 1) Pass one or more arrays (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the string values from the columns defined by `parse_dates` into a single array and pass that; and 3) call `date_parser` once for each row using one or more strings (corresponding to the columns defined by `parse_dates`) as arguments. dayfirst : bool, default False DD/MM format dates, international and European format. cache_dates : bool, default True If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. .. versionadded:: 0.25.0 iterator : bool, default False Return TextFileReader object for iteration or getting chunks with ``get_chunk()``. chunksize : int, optional Return TextFileReader object for iteration. See the `IO Tools docs <https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_ for more information on ``iterator`` and ``chunksize``. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no decompression). If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. thousands : str, optional Thousands separator. decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). lineterminator : str (length 1), optional Character to break file into lines. Only valid with C parser. quotechar : str (length 1), optional The character used to denote the start and end of a quoted item. Quoted items can include the delimiter and it will be ignored. quoting : int or csv.QUOTE_* instance, default 0 Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3). doublequote : bool, default ``True`` When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate whether or not to interpret two consecutive quotechar elements INSIDE a field as a single ``quotechar`` element. escapechar : str (length 1), optional One-character string used to escape other characters. comment : str, optional Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. Like empty lines (as long as ``skip_blank_lines=True``), fully commented lines are ignored by the parameter `header` but not by `skiprows`. For example, if ``comment='#'``, parsing ``#empty\na,b,c\n1,2,3`` with ``header=0`` will result in 'a,b,c' being treated as the header. encoding : str, optional Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ . dialect : str or csv.Dialect, optional If provided, this parameter will override values (default or not) for the following parameters: `delimiter`, `doublequote`, `escapechar`, `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to override values, a ParserWarning will be issued. See csv.Dialect documentation for more details. error_bad_lines : bool, default True Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. If False, then these "bad lines" will dropped from the DataFrame that is returned. warn_bad_lines : bool, default True If error_bad_lines is False, and warn_bad_lines is True, a warning for each "bad line" will be output. delim_whitespace : bool, default False Specifies whether or not whitespace (e.g. ``' '`` or ``' '``) will be used as the sep. Equivalent to setting ``sep='\s+'``. If this option is set to True, nothing should be passed in for the ``delimiter`` parameter. low_memory : bool, default True Internally process the file in chunks, resulting in lower memory use while parsing, but possibly mixed type inference. To ensure no mixed types either set False, or specify the type with the `dtype` parameter. Note that the entire file is read into a single DataFrame regardless, use the `chunksize` or `iterator` parameter to return the data in chunks. (Only valid with C parser). float_precision : str, optional Specifies which converter the C engine should use for floating-point values. The options are `None` for the ordinary converter, `high` for the high-precision converter, and `round_trip` for the round-trip converter. chunk_bytes: int, float or str, optional Number of chunk bytes. gpu: bool, default False If read into cudf DataFrame. head_bytes: int, float or str, optional Number of bytes to use in the head of file, mainly for data inference. head_lines: int, optional Number of lines to use in the head of file, mainly for data inference. incremental_index: bool, default False Create a new RangeIndex if csv doesn't contain index columns. use_arrow_dtype: bool, default None If True, use arrow dtype to store columns. storage_options: dict, optional Options for storage connection. Returns ------- DataFrame A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. Examples -------- >>> import mars.dataframe as md >>> md.read_csv('data.csv') # doctest: +SKIP >>> # read from HDFS >>> md.read_csv('hdfs://localhost:8020/test.csv') # doctest: +SKIP """ # infer dtypes and columns if isinstance(path, (list, tuple)): file_path = path[0] else: file_path = glob(path)[0] with open_file( file_path, compression=compression, storage_options=storage_options ) as f: if head_lines is not None: b = b"".join([f.readline() for _ in range(head_lines)]) else: head_bytes = int(parse_readable_size(head_bytes)[0]) head_start, head_end = _find_chunk_start_end(f, 0, head_bytes) f.seek(head_start) b = f.read(head_end - head_start) mini_df = pd.read_csv( BytesIO(b), sep=sep, index_col=index_col, dtype=dtype, names=names, header=header, ) if names is None: names = list(mini_df.columns) else: # if names specified, header should be None header = None if usecols: usecols = usecols if isinstance(usecols, list) else [usecols] col_index = sorted(mini_df.columns.get_indexer(usecols)) mini_df = mini_df.iloc[:, col_index] if isinstance(mini_df.index, pd.RangeIndex): index_value = parse_index(pd.RangeIndex(-1)) else: index_value = parse_index(mini_df.index) columns_value = parse_index(mini_df.columns, store_data=True) if index_col and not isinstance(index_col, int): index_col = list(mini_df.columns).index(index_col) op = DataFrameReadCSV( path=path, names=names, sep=sep, header=header, index_col=index_col, usecols=usecols, compression=compression, gpu=gpu, incremental_index=incremental_index, use_arrow_dtype=use_arrow_dtype, storage_options=storage_options, **kwargs, ) chunk_bytes = chunk_bytes or options.chunk_store_limit dtypes = mini_df.dtypes if use_arrow_dtype is None: use_arrow_dtype = options.dataframe.use_arrow_dtype if not gpu and use_arrow_dtype: dtypes = to_arrow_dtypes(dtypes, test_df=mini_df) ret = op( index_value=index_value, columns_value=columns_value, dtypes=dtypes, chunk_bytes=chunk_bytes, ) if nrows is not None: return ret.head(nrows) return ret
def read_csv( path, names=None, sep=",", index_col=None, compression=None, header="infer", dtype=None, usecols=None, nrows=None, chunk_bytes="64M", gpu=None, head_bytes="100k", head_lines=None, incremental_index=False, use_arrow_dtype=None, storage_options=None, **kwargs, ): r""" Read a comma-separated values (csv) file into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Parameters ---------- path : str Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv, you can alos read from external resources using a URL like: hdfs://localhost:8020/test.csv. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. sep : str, default ',' Delimiter to use. If sep is None, the C engine cannot automatically detect the separator, but the Python parsing engine can, meaning the latter will be used and automatically detect the separator by Python's builtin sniffer tool, ``csv.Sniffer``. In addition, separators longer than 1 character and different from ``'\s+'`` will be interpreted as regular expressions and will also force the use of the Python parsing engine. Note that regex delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``. delimiter : str, default ``None`` Alias for sep. header : int, list of int, default 'infer' Row number(s) to use as the column names, and the start of the data. Default behavior is to infer the column names: if no names are passed the behavior is identical to ``header=0`` and column names are inferred from the first line of the file, if column names are passed explicitly then the behavior is identical to ``header=None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not specified will be skipped (e.g. 2 in this example is skipped). Note that this parameter ignores commented lines and empty lines if ``skip_blank_lines=True``, so ``header=0`` denotes the first line of data rather than the first line of the file. names : array-like, optional List of column names to use. If the file contains a header row, then you should explicitly pass ``header=0`` to override the column names. Duplicates in this list are not allowed. index_col : int, str, sequence of int / str, or False, default ``None`` Column(s) to use as the row labels of the ``DataFrame``, either given as string name or column index. If a sequence of int / str is given, a MultiIndex is used. Note: ``index_col=False`` can be used to force pandas to *not* use the first column as the index, e.g. when you have a malformed file with delimiters at the end of each line. usecols : list-like or callable, optional Return a subset of the columns. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid list-like `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To instantiate a DataFrame from ``data`` with element order preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns in ``['foo', 'bar']`` order or ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` for ``['bar', 'foo']`` order. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to True. An example of a valid callable argument would be ``lambda x: x.upper() in ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster parsing time and lower memory usage. squeeze : bool, default False If the parsed data only contains one column then return a Series. prefix : str, optional Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ... mangle_dupe_cols : bool, default True Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. dtype : Type name or dict of column -> type, optional Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32, 'c': 'Int64'} Use `str` or `object` together with suitable `na_values` settings to preserve and not interpret dtype. If converters are specified, they will be applied INSTEAD of dtype conversion. engine : {'c', 'python'}, optional Parser engine to use. The C engine is faster while the python engine is currently more feature-complete. converters : dict, optional Dict of functions for converting values in certain columns. Keys can either be integers or column labels. true_values : list, optional Values to consider as True. false_values : list, optional Values to consider as False. skipinitialspace : bool, default False Skip spaces after delimiter. skiprows : list-like, int or callable, optional Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file. If callable, the callable function will be evaluated against the row indices, returning True if the row should be skipped and False otherwise. An example of a valid callable argument would be ``lambda x: x in [0, 2]``. skipfooter : int, default 0 Number of lines at bottom of file to skip (Unsupported with engine='c'). nrows : int, optional Number of rows of file to read. Useful for reading pieces of large files. na_values : scalar, str, list-like, or dict, optional Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted as NaN: '', '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN', '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A', 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'. keep_default_na : bool, default True Whether or not to include the default NaN values when parsing the data. Depending on whether `na_values` is passed in, the behavior is as follows: * If `keep_default_na` is True, and `na_values` are specified, `na_values` is appended to the default NaN values used for parsing. * If `keep_default_na` is True, and `na_values` are not specified, only the default NaN values are used for parsing. * If `keep_default_na` is False, and `na_values` are specified, only the NaN values specified `na_values` are used for parsing. * If `keep_default_na` is False, and `na_values` are not specified, no strings will be parsed as NaN. Note that if `na_filter` is passed in as False, the `keep_default_na` and `na_values` parameters will be ignored. na_filter : bool, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file. verbose : bool, default False Indicate number of NA values placed in non-numeric columns. skip_blank_lines : bool, default True If True, skip over blank lines rather than interpreting as NaN values. parse_dates : bool or list of int or names or list of lists or dict, default False The behavior is as follows: * boolean. If True -> try parsing the index. * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result 'foo' If a column or index cannot be represented as an array of datetimes, say because of an unparseable value or a mixture of timezones, the column or index will be returned unaltered as an object data type. For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``. To parse an index or column with a mixture of timezones, specify ``date_parser`` to be a partially-applied :func:`pandas.to_datetime` with ``utc=True``. See :ref:`io.csv.mixed_timezones` for more. Note: A fast-path exists for iso8601-formatted dates. infer_datetime_format : bool, default False If True and `parse_dates` is enabled, pandas will attempt to infer the format of the datetime strings in the columns, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by 5-10x. keep_date_col : bool, default False If True and `parse_dates` specifies combining multiple columns then keep the original columns. date_parser : function, optional Function to use for converting a sequence of string columns to an array of datetime instances. The default uses ``dateutil.parser.parser`` to do the conversion. Pandas will try to call `date_parser` in three different ways, advancing to the next if an exception occurs: 1) Pass one or more arrays (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the string values from the columns defined by `parse_dates` into a single array and pass that; and 3) call `date_parser` once for each row using one or more strings (corresponding to the columns defined by `parse_dates`) as arguments. dayfirst : bool, default False DD/MM format dates, international and European format. cache_dates : bool, default True If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. .. versionadded:: 0.25.0 iterator : bool, default False Return TextFileReader object for iteration or getting chunks with ``get_chunk()``. chunksize : int, optional Return TextFileReader object for iteration. See the `IO Tools docs <https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_ for more information on ``iterator`` and ``chunksize``. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no decompression). If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. thousands : str, optional Thousands separator. decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). lineterminator : str (length 1), optional Character to break file into lines. Only valid with C parser. quotechar : str (length 1), optional The character used to denote the start and end of a quoted item. Quoted items can include the delimiter and it will be ignored. quoting : int or csv.QUOTE_* instance, default 0 Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3). doublequote : bool, default ``True`` When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate whether or not to interpret two consecutive quotechar elements INSIDE a field as a single ``quotechar`` element. escapechar : str (length 1), optional One-character string used to escape other characters. comment : str, optional Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. Like empty lines (as long as ``skip_blank_lines=True``), fully commented lines are ignored by the parameter `header` but not by `skiprows`. For example, if ``comment='#'``, parsing ``#empty\na,b,c\n1,2,3`` with ``header=0`` will result in 'a,b,c' being treated as the header. encoding : str, optional Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python standard encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ . dialect : str or csv.Dialect, optional If provided, this parameter will override values (default or not) for the following parameters: `delimiter`, `doublequote`, `escapechar`, `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to override values, a ParserWarning will be issued. See csv.Dialect documentation for more details. error_bad_lines : bool, default True Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. If False, then these "bad lines" will dropped from the DataFrame that is returned. warn_bad_lines : bool, default True If error_bad_lines is False, and warn_bad_lines is True, a warning for each "bad line" will be output. delim_whitespace : bool, default False Specifies whether or not whitespace (e.g. ``' '`` or ``' '``) will be used as the sep. Equivalent to setting ``sep='\s+'``. If this option is set to True, nothing should be passed in for the ``delimiter`` parameter. low_memory : bool, default True Internally process the file in chunks, resulting in lower memory use while parsing, but possibly mixed type inference. To ensure no mixed types either set False, or specify the type with the `dtype` parameter. Note that the entire file is read into a single DataFrame regardless, use the `chunksize` or `iterator` parameter to return the data in chunks. (Only valid with C parser). float_precision : str, optional Specifies which converter the C engine should use for floating-point values. The options are `None` for the ordinary converter, `high` for the high-precision converter, and `round_trip` for the round-trip converter. chunk_bytes: int, float or str, optional Number of chunk bytes. gpu: bool, default False If read into cudf DataFrame. head_bytes: int, float or str, optional Number of bytes to use in the head of file, mainly for data inference. head_lines: int, optional Number of lines to use in the head of file, mainly for data inference. incremental_index: bool, default False Create a new RangeIndex if csv doesn't contain index columns. use_arrow_dtype: bool, default None If True, use arrow dtype to store columns. storage_options: dict, optional Options for storage connection. Returns ------- DataFrame A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. Examples -------- >>> import mars.dataframe as md >>> md.read_csv('data.csv') # doctest: +SKIP >>> # read from HDFS >>> md.read_csv('hdfs://localhost:8020/test.csv') # doctest: +SKIP """ # infer dtypes and columns if isinstance(path, (list, tuple)): file_path = path[0] else: file_path = glob(path)[0] with open_file( file_path, compression=compression, storage_options=storage_options ) as f: if head_lines is not None: b = b"".join([f.readline() for _ in range(head_lines)]) else: head_bytes = int(parse_readable_size(head_bytes)[0]) head_start, head_end = _find_chunk_start_end(f, 0, head_bytes) f.seek(head_start) b = f.read(head_end - head_start) mini_df = pd.read_csv( BytesIO(b), sep=sep, index_col=index_col, dtype=dtype, names=names, header=header, ) if isinstance(mini_df.index, pd.RangeIndex): index_value = parse_index(pd.RangeIndex(-1)) else: index_value = parse_index(mini_df.index) columns_value = parse_index(mini_df.columns, store_data=True) if index_col and not isinstance(index_col, int): index_col = list(mini_df.columns).index(index_col) names = list(mini_df.columns) op = DataFrameReadCSV( path=path, names=names, sep=sep, header=header, index_col=index_col, usecols=usecols, compression=compression, gpu=gpu, incremental_index=incremental_index, use_arrow_dtype=use_arrow_dtype, storage_options=storage_options, **kwargs, ) chunk_bytes = chunk_bytes or options.chunk_store_limit dtypes = mini_df.dtypes if use_arrow_dtype is None: use_arrow_dtype = options.dataframe.use_arrow_dtype if not gpu and use_arrow_dtype: dtypes = to_arrow_dtypes(dtypes, test_df=mini_df) ret = op( index_value=index_value, columns_value=columns_value, dtypes=dtypes, chunk_bytes=chunk_bytes, ) if nrows is not None: return ret.head(nrows) return ret
https://github.com/mars-project/mars/issues/1736
In [20]: d.flag.execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-20-68cd215e82a2> in <module> ----> 1 d.flag.execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 641 642 if wait: --> 643 return run() 644 else: 645 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 637 638 def run(): --> 639 self.data.execute(session, **kw) 640 return self 641 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 377 378 if wait: --> 379 return run() 380 else: 381 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 372 def run(): 373 # no more fetch, thus just fire run --> 374 session.run(self, **kw) 375 # return Tileable or ExecutableTuple itself 376 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 497 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 498 for t in tileables) --> 499 result = self._sess.run(*tileables, **kw) 500 501 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 859 # build chunk graph, tile will be done during building 860 chunk_graph = chunk_graph_builder.build( --> 861 tileables, tileable_graph=tileable_graph) 862 tileable_graph = chunk_graph_builder.prev_tileable_graph 863 temp_result_keys = set(result_keys) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/tiles.py in build(self, tileables, tileable_graph) 346 347 chunk_graph = super().build( --> 348 tileables, tileable_graph=tileable_graph) 349 self._iterative_chunk_graphs.append(chunk_graph) 350 if len(self._interrupted_ops) == 0: ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/tiles.py in build(self, tileables, tileable_graph) 260 # for further execution 261 partial_tiled_chunks = \ --> 262 self._on_tile_failure(tileable_data.op, exc_info) 263 if partial_tiled_chunks is not None and \ 264 len(partial_tiled_chunks) > 0: ~/Workspace/mars/mars/tiles.py in inner(op, exc_info) 299 on_tile_failure(op, exc_info) 300 else: --> 301 raise exc_info[1].with_traceback(exc_info[2]) from None 302 return inner 303 ~/Workspace/mars/mars/tiles.py in build(self, tileables, tileable_graph) 240 continue 241 try: --> 242 tiled = self._tile(tileable_data, tileable_graph) 243 tiled_op.add(tileable_data.op) 244 for t, td in zip(tileable_data.op.outputs, tiled): ~/Workspace/mars/mars/tiles.py in _tile(self, tileable_data, tileable_graph) 335 if any(inp.op in self._interrupted_ops for inp in tileable_data.inputs): 336 raise TilesError('Tile fail due to failure of inputs') --> 337 return super()._tile(tileable_data, tileable_graph) 338 339 @enter_mode(build=True, kernel=True) ~/Workspace/mars/mars/tiles.py in _tile(self, tileable_data, tileable_graph) 199 t._nsplits = o.nsplits 200 elif on_tile is None: --> 201 tds[0]._inplace_tile() 202 else: 203 tds = on_tile(tileable_data.op.outputs, tds) ~/Workspace/mars/mars/core.py in _inplace_tile(self) 166 167 def _inplace_tile(self): --> 168 return handler.inplace_tile(self) 169 170 def __getattr__(self, attr): ~/Workspace/mars/mars/tiles.py in inplace_tile(self, to_tile) 134 if not to_tile.is_coarse(): 135 return to_tile --> 136 dispatched = self.dispatch(to_tile.op) 137 self._assign_to([d.data for d in dispatched], to_tile.op.outputs) 138 return to_tile ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/tiles.py in dispatch(self, op) 117 else: 118 try: --> 119 tiled = op_cls.tile(op) 120 except NotImplementedError as ex: 121 cause = ex ~/Workspace/mars/mars/dataframe/indexing/getitem.py in tile(cls, op) 269 def tile(cls, op): 270 if op.col_names is not None: --> 271 return cls.tile_with_columns(op) 272 else: 273 return cls.tile_with_mask(op) ~/Workspace/mars/mars/dataframe/indexing/getitem.py in tile_with_columns(cls, op) 340 dtype = in_df.dtypes[col_names] 341 for i in range(in_df.chunk_shape[0]): --> 342 c = in_df.cix[(i, column_index)] 343 op = DataFrameIndex(col_names=col_names) 344 out_chunks.append(op.new_chunk([c], shape=(c.shape[0],), index=(i,), dtype=dtype, ~/Workspace/mars/mars/core.py in __getitem__(self, item) 714 indexes = tuple(zip(*itertools.product(*slices))) 715 --> 716 flat_index = np.ravel_multi_index(indexes, self._tileable.chunk_shape) 717 if singleton: 718 return self._tileable._chunks[flat_index[0]] <__array_function__ internals> in ravel_multi_index(*args, **kwargs) ValueError: invalid entry in coordinates array
ValueError
def __call__(self, series, dtype): if dtype is None: inferred_dtype = None if callable(self._arg): # arg is a function, try to inspect the signature sig = inspect.signature(self._arg) return_type = sig.return_annotation if return_type is not inspect._empty: inferred_dtype = np.dtype(return_type) else: try: # try to infer dtype by calling the function inferred_dtype = ( build_series(series) .map(self._arg, na_action=self._na_action) .dtype ) except: # noqa: E722 # nosec pass else: if isinstance(self._arg, MutableMapping): inferred_dtype = pd.Series(self._arg).dtype else: inferred_dtype = self._arg.dtype if inferred_dtype is not None and np.issubdtype(inferred_dtype, np.number): if np.issubdtype(inferred_dtype, np.inexact): # for the inexact e.g. float # we can make the decision, # but for int, due to the nan which may occur, # we cannot infer the dtype dtype = inferred_dtype else: dtype = inferred_dtype if dtype is None: raise ValueError( "cannot infer dtype, it needs to be specified manually for `map`" ) else: dtype = np.int64 if dtype is int else dtype dtype = np.dtype(dtype) inputs = [series] if isinstance(self._arg, SERIES_TYPE): inputs.append(self._arg) return self.new_series( inputs, shape=series.shape, dtype=dtype, index_value=series.index_value, name=series.name, )
def __call__(self, series, dtype): if dtype is None: inferred_dtype = None if callable(self._arg): # arg is a function, try to inspect the signature sig = inspect.signature(self._arg) return_type = sig.return_annotation if return_type is not inspect._empty: inferred_dtype = np.dtype(return_type) else: if isinstance(self._arg, MutableMapping): inferred_dtype = pd.Series(self._arg).dtype else: inferred_dtype = self._arg.dtype if inferred_dtype is not None and np.issubdtype(inferred_dtype, np.number): if np.issubdtype(inferred_dtype, np.inexact): # for the inexact e.g. float # we can make the decision, # but for int, due to the nan which may occur, # we cannot infer the dtype dtype = inferred_dtype else: dtype = inferred_dtype if dtype is None: raise ValueError( "cannot infer dtype, it needs to be specified manually for `map`" ) else: dtype = np.int64 if dtype is int else dtype dtype = np.dtype(dtype) inputs = [series] if isinstance(self._arg, SERIES_TYPE): inputs.append(self._arg) return self.new_series( inputs, shape=series.shape, dtype=dtype, index_value=series.index_value, name=series.name, )
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def tile(cls, op): inp = op.input out = op.outputs[0] if len(inp.chunks) == 1: chunk_op = op.copy().reset_key() chunk_param = out.params chunk_param["index"] = (0,) chunk = chunk_op.new_chunk(inp.chunks, kws=[chunk_param]) new_op = op.copy() param = out.params param["chunks"] = [chunk] param["nsplits"] = ((np.nan,),) return new_op.new_seriess(op.inputs, kws=[param]) inp = Series(inp) if op.dropna: inp = inp.dropna() inp = inp.groupby(inp).count(method=op.method) if op.normalize: if op.convert_index_to_interval: check_chunks_unknown_shape([op.input], TilesError) inp = inp.truediv(op.input.shape[0], axis=0) else: inp = inp.truediv(inp.sum(), axis=0) if op.sort: inp = inp.sort_values(ascending=op.ascending) ret = recursive_tile(inp) chunks = [] for c in ret.chunks: chunk_op = DataFrameValueCounts( convert_index_to_interval=op.convert_index_to_interval, stage=OperandStage.map, ) chunk_params = c.params if op.convert_index_to_interval: # convert index to IntervalDtype chunk_params["index_value"] = parse_index( pd.IntervalIndex([]), c, store_data=False ) chunks.append(chunk_op.new_chunk([c], kws=[chunk_params])) new_op = op.copy() params = out.params params["chunks"] = chunks params["nsplits"] = ret.nsplits return new_op.new_seriess(out.inputs, kws=[params])
def tile(cls, op): inp = op.input out = op.outputs[0] if len(inp.chunks) == 1: chunk_op = op.copy().reset_key() chunk_param = out.params chunk_param["index"] = (0,) chunk = chunk_op.new_chunk(inp.chunks, kws=[chunk_param]) new_op = op.copy() param = out.params param["chunks"] = [chunk] param["nsplits"] = ((np.nan,),) return new_op.new_seriess(op.inputs, kws=[param]) inp = Series(inp) if op.dropna: inp = inp.dropna() inp = inp.groupby(inp).count(method=op.method) if op.normalize: if op.convert_index_to_interval: check_chunks_unknown_shape([op.input], TilesError) inp = inp.truediv(op.input.shape[0], axis=0) else: inp = inp.truediv(inp.sum(), axis=0) if op.sort: inp = inp.sort_values(ascending=op.ascending) ret = recursive_tile(inp) if op.convert_index_to_interval: # convert index to IntervalDtype chunks = [] for c in ret.chunks: chunk_op = DataFrameValueCounts( convert_index_to_interval=True, stage=OperandStage.map ) chunk_params = c.params chunk_params["index_value"] = parse_index( pd.IntervalIndex([]), c, store_data=False ) chunks.append(chunk_op.new_chunk([c], kws=[chunk_params])) new_op = op.copy() params = out.params params["chunks"] = chunks params["nsplits"] = ret.nsplits return new_op.new_seriess(out.inputs, kws=[params]) return [ret]
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def execute(cls, ctx, op: "DataFrameValueCounts"): if op.stage != OperandStage.map: in_data = ctx[op.input.key] if op.convert_index_to_interval: result = in_data.value_counts( normalize=False, sort=op.sort, ascending=op.ascending, bins=op.bins, dropna=op.dropna, ) if op.normalize: result /= in_data.shape[0] else: try: result = in_data.value_counts( normalize=op.normalize, sort=op.sort, ascending=op.ascending, bins=op.bins, dropna=op.dropna, ) except ValueError: in_data = in_data.copy() result = in_data.value_counts( normalize=op.normalize, sort=op.sort, ascending=op.ascending, bins=op.bins, dropna=op.dropna, ) else: result = ctx[op.input.key] # set index name to None to keep consistency with pandas result.index.name = None if op.convert_index_to_interval: # convert CategoricalDtype which generated in `cut` # to IntervalDtype result.index = result.index.astype("interval") ctx[op.outputs[0].key] = result
def execute(cls, ctx, op: "DataFrameValueCounts"): if op.stage != OperandStage.map: in_data = ctx[op.input.key] if op.convert_index_to_interval: result = in_data.value_counts( normalize=False, sort=op.sort, ascending=op.ascending, bins=op.bins, dropna=op.dropna, ) if op.normalize: result /= in_data.shape[0] else: try: result = in_data.value_counts( normalize=op.normalize, sort=op.sort, ascending=op.ascending, bins=op.bins, dropna=op.dropna, ) except ValueError: in_data = in_data.copy() result = in_data.value_counts( normalize=op.normalize, sort=op.sort, ascending=op.ascending, bins=op.bins, dropna=op.dropna, ) else: result = ctx[op.input.key] if op.convert_index_to_interval: # convert CategoricalDtype which generated in `cut` # to IntervalDtype result.index = result.index.astype("interval") ctx[op.outputs[0].key] = result
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def build_mock_groupby(self, **kwargs): in_df = self.inputs[0] if self.is_dataframe_obj: empty_df = build_df(in_df, size=1) obj_dtypes = in_df.dtypes[in_df.dtypes == np.dtype("O")] empty_df[obj_dtypes.index] = "O" else: if in_df.dtype == np.dtype("O"): empty_df = pd.Series( "O", index=pd.RangeIndex(2), name=in_df.name, dtype=np.dtype("O") ) else: empty_df = build_series(in_df, size=1, name=in_df.name) new_kw = self.groupby_params new_kw.update(kwargs) if new_kw.get("level"): new_kw["level"] = 0 if isinstance(new_kw["by"], list): new_by = [] for v in new_kw["by"]: if isinstance(v, (Base, Entity)): new_by.append(build_series(v, size=1, name=v.name)) else: new_by.append(v) new_kw["by"] = new_by return empty_df.groupby(**new_kw)
def build_mock_groupby(self, **kwargs): in_df = self.inputs[0] if self.is_dataframe_obj: empty_df = build_df(in_df, size=2) obj_dtypes = in_df.dtypes[in_df.dtypes == np.dtype("O")] empty_df[obj_dtypes.index] = "O" else: if in_df.dtype == np.dtype("O"): empty_df = pd.Series( "O", index=pd.RangeIndex(2), name=in_df.name, dtype=np.dtype("O") ) else: empty_df = build_series(in_df, size=2, name=in_df.name) new_kw = self.groupby_params new_kw.update(kwargs) if new_kw.get("level"): new_kw["level"] = 0 if isinstance(new_kw["by"], list): new_by = [] for v in new_kw["by"]: if isinstance(v, (Base, Entity)): new_by.append(build_series(v, size=2, name=v.name)) else: new_by.append(v) new_kw["by"] = new_by return empty_df.groupby(**new_kw)
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def __call__(self, df): if self.col_names is not None: # if col_names is a list, return a DataFrame, else return a Series if isinstance(self._col_names, list): dtypes = df.dtypes[self._col_names] columns = parse_index(pd.Index(self._col_names), store_data=True) return self.new_dataframe( [df], shape=(df.shape[0], len(self._col_names)), dtypes=dtypes, index_value=df.index_value, columns_value=columns, ) else: dtype = df.dtypes[self._col_names] return self.new_series( [df], shape=(df.shape[0],), dtype=dtype, index_value=df.index_value, name=self._col_names, ) else: if isinstance(self.mask, (SERIES_TYPE, DATAFRAME_TYPE)): index_value = parse_index( pd.Index( [], dtype=df.index_value.to_pandas().dtype, name=df.index_value.name ), df, self._mask, ) return self.new_dataframe( [df, self._mask], shape=(np.nan, df.shape[1]), dtypes=df.dtypes, index_value=index_value, columns_value=df.columns_value, ) else: index_value = parse_index( pd.Index( [], dtype=df.index_value.to_pandas().dtype, name=df.index_value.name ), df, self._mask, ) return self.new_dataframe( [df], shape=(np.nan, df.shape[1]), dtypes=df.dtypes, index_value=index_value, columns_value=df.columns_value, )
def __call__(self, df): if self.col_names is not None: # if col_names is a list, return a DataFrame, else return a Series if isinstance(self._col_names, list): dtypes = df.dtypes[self._col_names] columns = parse_index(pd.Index(self._col_names), store_data=True) return self.new_dataframe( [df], shape=(df.shape[0], len(self._col_names)), dtypes=dtypes, index_value=df.index_value, columns_value=columns, ) else: dtype = df.dtypes[self._col_names] return self.new_series( [df], shape=(df.shape[0],), dtype=dtype, index_value=df.index_value, name=self._col_names, ) else: if isinstance(self.mask, (SERIES_TYPE, DATAFRAME_TYPE)): index_value = parse_index( pd.Index([], dtype=df.index_value.to_pandas().dtype), df, self._mask ) return self.new_dataframe( [df, self._mask], shape=(np.nan, df.shape[1]), dtypes=df.dtypes, index_value=index_value, columns_value=df.columns_value, ) else: index_value = parse_index( pd.Index([], dtype=df.index_value.to_pandas().dtype), df, self._mask ) return self.new_dataframe( [df], shape=(np.nan, df.shape[1]), dtypes=df.dtypes, index_value=index_value, columns_value=df.columns_value, )
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def set_index(df, keys, drop=True, append=False, inplace=False, verify_integrity=False): op = DataFrameSetIndex( keys=keys, drop=drop, append=append, verify_integrity=verify_integrity, output_types=[OutputType.dataframe], ) result = op(df) if not inplace: return result else: df.data = result.data
def set_index(df, keys, drop=True, append=False, verify_integrity=False, **kw): op = DataFrameSetIndex( keys=keys, drop=drop, append=append, verify_integrity=verify_integrity, output_types=[OutputType.dataframe], **kw, ) return op(df)
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def parse_index(index_value, *args, store_data=False, key=None): from .core import IndexValue def _extract_property(index, tp, ret_data): kw = { "_min_val": _get_index_min(index), "_max_val": _get_index_max(index), "_min_val_close": True, "_max_val_close": True, "_key": key or _tokenize_index(index, *args), } if ret_data: kw["_data"] = index.values for field in tp._FIELDS: if field in kw or field == "_data": continue val = getattr(index, field.lstrip("_"), None) if val is not None: kw[field] = val return kw def _tokenize_index(index, *token_objects): if not index.empty: return tokenize(index) else: return tokenize(index, *token_objects) def _get_index_min(index): try: return index.min() except ValueError: if isinstance(index, pd.IntervalIndex): return None raise except TypeError: return None def _get_index_max(index): try: return index.max() except ValueError: if isinstance(index, pd.IntervalIndex): return None raise except TypeError: return None def _serialize_index(index): tp = getattr(IndexValue, type(index).__name__) properties = _extract_property(index, tp, store_data) properties["_name"] = index.name return tp(**properties) def _serialize_range_index(index): if is_pd_range_empty(index): properties = { "_is_monotonic_increasing": True, "_is_monotonic_decreasing": False, "_is_unique": True, "_min_val": _get_index_min(index), "_max_val": _get_index_max(index), "_min_val_close": True, "_max_val_close": False, "_key": key or _tokenize_index(index, *args), "_name": index.name, "_dtype": index.dtype, } else: properties = _extract_property(index, IndexValue.RangeIndex, False) return IndexValue.RangeIndex( _slice=slice( _get_range_index_start(index), _get_range_index_stop(index), _get_range_index_step(index), ), **properties, ) def _serialize_multi_index(index): kw = _extract_property(index, IndexValue.MultiIndex, store_data) kw["_sortorder"] = index.sortorder kw["_dtypes"] = [lev.dtype for lev in index.levels] return IndexValue.MultiIndex(**kw) if index_value is None: return IndexValue( _index_value=IndexValue.Index( _is_monotonic_increasing=False, _is_monotonic_decreasing=False, _is_unique=False, _min_val=None, _max_val=None, _min_val_close=True, _max_val_close=True, _key=key or tokenize(*args), ) ) if isinstance(index_value, pd.RangeIndex): return IndexValue(_index_value=_serialize_range_index(index_value)) elif isinstance(index_value, pd.MultiIndex): return IndexValue(_index_value=_serialize_multi_index(index_value)) else: return IndexValue(_index_value=_serialize_index(index_value))
def parse_index(index_value, *args, store_data=False, key=None): from .core import IndexValue def _extract_property(index, tp, ret_data): kw = { "_min_val": _get_index_min(index), "_max_val": _get_index_max(index), "_min_val_close": True, "_max_val_close": True, "_key": key or _tokenize_index(index, *args), } if ret_data: kw["_data"] = index.values for field in tp._FIELDS: if field in kw or field == "_data": continue val = getattr(index, field.lstrip("_"), None) if val is not None: kw[field] = val return kw def _tokenize_index(index, *token_objects): if not index.empty: return tokenize(index) else: return tokenize(index, *token_objects) def _get_index_min(index): try: return index.min() except ValueError: if isinstance(index, pd.IntervalIndex): return None raise except TypeError: return None def _get_index_max(index): try: return index.max() except ValueError: if isinstance(index, pd.IntervalIndex): return None raise except TypeError: return None def _serialize_index(index): tp = getattr(IndexValue, type(index).__name__) properties = _extract_property(index, tp, store_data) return tp(**properties) def _serialize_range_index(index): if is_pd_range_empty(index): properties = { "_is_monotonic_increasing": True, "_is_monotonic_decreasing": False, "_is_unique": True, "_min_val": _get_index_min(index), "_max_val": _get_index_max(index), "_min_val_close": True, "_max_val_close": False, "_key": key or _tokenize_index(index, *args), "_name": index.name, "_dtype": index.dtype, } else: properties = _extract_property(index, IndexValue.RangeIndex, False) return IndexValue.RangeIndex( _slice=slice( _get_range_index_start(index), _get_range_index_stop(index), _get_range_index_step(index), ), **properties, ) def _serialize_multi_index(index): kw = _extract_property(index, IndexValue.MultiIndex, store_data) kw["_sortorder"] = index.sortorder kw["_dtypes"] = [lev.dtype for lev in index.levels] return IndexValue.MultiIndex(**kw) if index_value is None: return IndexValue( _index_value=IndexValue.Index( _is_monotonic_increasing=False, _is_monotonic_decreasing=False, _is_unique=False, _min_val=None, _max_val=None, _min_val_close=True, _max_val_close=True, _key=key or tokenize(*args), ) ) if isinstance(index_value, pd.RangeIndex): return IndexValue(_index_value=_serialize_range_index(index_value)) elif isinstance(index_value, pd.MultiIndex): return IndexValue(_index_value=_serialize_multi_index(index_value)) else: return IndexValue(_index_value=_serialize_index(index_value))
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def _serialize_index(index): tp = getattr(IndexValue, type(index).__name__) properties = _extract_property(index, tp, store_data) properties["_name"] = index.name return tp(**properties)
def _serialize_index(index): tp = getattr(IndexValue, type(index).__name__) properties = _extract_property(index, tp, store_data) return tp(**properties)
https://github.com/mars-project/mars/issues/1717
In [4]: import mars.dataframe as md In [5]: md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-5-90507c117e4f> in <module> ----> 1 md.Series(['1-1', '2-2']).map(lambda x: x.split('-')[0]).execute() ~/Workspace/mars/mars/dataframe/base/map.py in map_(series, arg, na_action, dtype) 155 def map_(series, arg, na_action=None, dtype=None): 156 op = DataFrameMap(arg=arg, na_action=na_action) --> 157 return op(series, dtype=dtype) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 449 def _inner(*args, **kwargs): 450 with self: --> 451 return func(*args, **kwargs) 452 453 return _inner ~/Workspace/mars/mars/dataframe/base/map.py in __call__(self, series, dtype) 93 94 if dtype is None: ---> 95 raise ValueError('cannot infer dtype, ' 96 'it needs to be specified manually for `map`') 97 else: ValueError: cannot infer dtype, it needs to be specified manually for `map`
ValueError
def _install(): from ..core import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE from .standardize_range_index import ChunkStandardizeRangeIndex from .string_ import _string_method_to_handlers from .datetimes import _datetime_method_to_handlers from .accessor import StringAccessor, DatetimeAccessor, CachedAccessor for t in DATAFRAME_TYPE: setattr(t, "to_gpu", to_gpu) setattr(t, "to_cpu", to_cpu) setattr(t, "rechunk", rechunk) setattr(t, "describe", describe) setattr(t, "apply", df_apply) setattr(t, "transform", df_transform) setattr(t, "fillna", fillna) setattr(t, "ffill", ffill) setattr(t, "bfill", bfill) setattr(t, "isin", df_isin) setattr(t, "isna", isna) setattr(t, "isnull", isnull) setattr(t, "notna", notna) setattr(t, "notnull", notnull) setattr(t, "dropna", df_dropna) setattr(t, "shift", shift) setattr(t, "tshift", tshift) setattr(t, "diff", df_diff) setattr(t, "astype", astype) setattr(t, "drop", df_drop) setattr(t, "pop", df_pop) setattr( t, "__delitem__", lambda df, items: df_drop(df, items, axis=1, inplace=True) ) setattr(t, "drop_duplicates", df_drop_duplicates) setattr(t, "melt", melt) setattr(t, "memory_usage", df_memory_usage) setattr(t, "select_dtypes", select_dtypes) setattr(t, "map_chunk", map_chunk) setattr(t, "rebalance", rebalance) setattr(t, "stack", stack) setattr(t, "explode", df_explode) for t in SERIES_TYPE: setattr(t, "to_gpu", to_gpu) setattr(t, "to_cpu", to_cpu) setattr(t, "rechunk", rechunk) setattr(t, "map", map_) setattr(t, "describe", describe) setattr(t, "apply", series_apply) setattr(t, "transform", series_transform) setattr(t, "fillna", fillna) setattr(t, "ffill", ffill) setattr(t, "bfill", bfill) setattr(t, "isin", series_isin) setattr(t, "isna", isna) setattr(t, "isnull", isnull) setattr(t, "notna", notna) setattr(t, "notnull", notnull) setattr(t, "dropna", series_dropna) setattr(t, "shift", shift) setattr(t, "tshift", tshift) setattr(t, "diff", series_diff) setattr(t, "value_counts", value_counts) setattr(t, "astype", astype) setattr(t, "drop", series_drop) setattr(t, "drop_duplicates", series_drop_duplicates) setattr(t, "memory_usage", series_memory_usage) setattr(t, "map_chunk", map_chunk) setattr(t, "rebalance", rebalance) setattr(t, "explode", series_explode) for t in INDEX_TYPE: setattr(t, "rechunk", rechunk) setattr(t, "drop", index_drop) setattr(t, "drop_duplicates", index_drop_duplicates) setattr(t, "memory_usage", index_memory_usage) for method in _string_method_to_handlers: if not hasattr(StringAccessor, method): StringAccessor._register(method) for method in _datetime_method_to_handlers: if not hasattr(DatetimeAccessor, method): DatetimeAccessor._register(method) for series in SERIES_TYPE: series.str = CachedAccessor("str", StringAccessor) series.dt = CachedAccessor("dt", DatetimeAccessor)
def _install(): from ..core import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE from .standardize_range_index import ChunkStandardizeRangeIndex from .string_ import _string_method_to_handlers from .datetimes import _datetime_method_to_handlers from .accessor import StringAccessor, DatetimeAccessor, CachedAccessor for t in DATAFRAME_TYPE: setattr(t, "to_gpu", to_gpu) setattr(t, "to_cpu", to_cpu) setattr(t, "rechunk", rechunk) setattr(t, "describe", describe) setattr(t, "apply", df_apply) setattr(t, "transform", df_transform) setattr(t, "fillna", fillna) setattr(t, "ffill", ffill) setattr(t, "bfill", bfill) setattr(t, "isin", df_isin) setattr(t, "isna", isna) setattr(t, "isnull", isnull) setattr(t, "notna", notna) setattr(t, "notnull", notnull) setattr(t, "dropna", df_dropna) setattr(t, "shift", shift) setattr(t, "tshift", tshift) setattr(t, "diff", df_diff) setattr(t, "astype", astype) setattr(t, "drop", df_drop) setattr(t, "pop", df_pop) setattr( t, "__delitem__", lambda df, items: df_drop(df, items, axis=1, inplace=True) ) setattr(t, "drop_duplicates", df_drop_duplicates) setattr(t, "melt", melt) setattr(t, "memory_usage", df_memory_usage) setattr(t, "select_dtypes", select_dtypes) setattr(t, "map_chunk", map_chunk) setattr(t, "rebalance", rebalance) setattr(t, "stack", stack) for t in SERIES_TYPE: setattr(t, "to_gpu", to_gpu) setattr(t, "to_cpu", to_cpu) setattr(t, "rechunk", rechunk) setattr(t, "map", map_) setattr(t, "describe", describe) setattr(t, "apply", series_apply) setattr(t, "transform", series_transform) setattr(t, "fillna", fillna) setattr(t, "ffill", ffill) setattr(t, "bfill", bfill) setattr(t, "isin", series_isin) setattr(t, "isna", isna) setattr(t, "isnull", isnull) setattr(t, "notna", notna) setattr(t, "notnull", notnull) setattr(t, "dropna", series_dropna) setattr(t, "shift", shift) setattr(t, "tshift", tshift) setattr(t, "diff", series_diff) setattr(t, "value_counts", value_counts) setattr(t, "astype", astype) setattr(t, "drop", series_drop) setattr(t, "drop_duplicates", series_drop_duplicates) setattr(t, "memory_usage", series_memory_usage) setattr(t, "map_chunk", map_chunk) setattr(t, "rebalance", rebalance) for t in INDEX_TYPE: setattr(t, "rechunk", rechunk) setattr(t, "drop", index_drop) setattr(t, "drop_duplicates", index_drop_duplicates) setattr(t, "memory_usage", index_memory_usage) for method in _string_method_to_handlers: if not hasattr(StringAccessor, method): StringAccessor._register(method) for method in _datetime_method_to_handlers: if not hasattr(DatetimeAccessor, method): DatetimeAccessor._register(method) for series in SERIES_TYPE: series.str = CachedAccessor("str", StringAccessor) series.dt = CachedAccessor("dt", DatetimeAccessor)
https://github.com/mars-project/mars/issues/1704
In [4]: df.sort_values(by='col1').execute() Out[4]: --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) ~/miniconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj) 700 type_pprinters=self.type_printers, 701 deferred_pprinters=self.deferred_printers) --> 702 printer.pretty(obj) 703 printer.flush() 704 return stream.getvalue() ~/miniconda3/lib/python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj) 392 if cls is not object \ 393 and callable(cls.__dict__.get('__repr__')): --> 394 return _repr_pprint(obj, self, cycle) 395 396 return _default_pprint(obj, self, cycle) ~/miniconda3/lib/python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle) 698 """A pprint that just redirects to the normal repr function.""" 699 # Find newlines and replace them with p.break_() --> 700 output = repr(obj) 701 lines = output.splitlines() 702 with p.group(): ~/Documents/mars_dev/mars/mars/core.py in __repr__(self) 129 130 def __repr__(self): --> 131 return self._data.__repr__() 132 133 def _check_data(self, data): ~/Documents/mars_dev/mars/mars/dataframe/core.py in __repr__(self) 1084 1085 def __repr__(self): -> 1086 return self._to_str(representation=True) 1087 1088 def _repr_html_(self): ~/Documents/mars_dev/mars/mars/dataframe/core.py in _to_str(self, representation) 1057 else: 1058 corner_data = fetch_corner_data( -> 1059 self, session=self._executed_sessions[-1]) 1060 1061 buf = StringIO() ~/Documents/mars_dev/mars/mars/dataframe/utils.py in fetch_corner_data(df_or_series, session) 895 head_data, tail_data = \ 896 ExecutableTuple([head, tail]).fetch(session=session) --> 897 return pd.concat([head_data, tail_data], axis='index') 898 899 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 279 verify_integrity=verify_integrity, 280 copy=copy, --> 281 sort=sort, 282 ) 283 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 358 359 # consolidate --> 360 obj._consolidate(inplace=True) 361 ndims.add(obj.ndim) 362 ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _consolidate(self, inplace) 5363 inplace = validate_bool_kwarg(inplace, "inplace") 5364 if inplace: -> 5365 self._consolidate_inplace() 5366 else: 5367 f = lambda: self._data.consolidate() ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _consolidate_inplace(self) 5345 self._data = self._data.consolidate() 5346 -> 5347 self._protect_consolidate(f) 5348 5349 def _consolidate(self, inplace: bool_t = False): ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _protect_consolidate(self, f) 5333 cache 5334 """ -> 5335 blocks_before = len(self._data.blocks) 5336 result = f() 5337 if len(self._data.blocks) != blocks_before: ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in __getattr__(self, name) 5268 or name in self._accessors 5269 ): -> 5270 return object.__getattribute__(self, name) 5271 else: 5272 if self._info_axis._can_hold_identifiers_and_holds_name(name): AttributeError: 'DataFrame' object has no attribute '_data'
AttributeError
def load(file): header = read_file_header(file) file = open_decompression_file(file, header.compress) try: buf = file.read() finally: if header.compress != CompressType.NONE: file.close() if header.type == SerialType.ARROW: return deserialize(memoryview(buf)) else: return _patch_pandas_mgr(pickle.loads(buf)) # nosec
def load(file): header = read_file_header(file) file = open_decompression_file(file, header.compress) try: buf = file.read() finally: if header.compress != CompressType.NONE: file.close() if header.type == SerialType.ARROW: return pyarrow.deserialize(memoryview(buf), mars_serialize_context()) else: return pickle.loads(buf)
https://github.com/mars-project/mars/issues/1704
In [4]: df.sort_values(by='col1').execute() Out[4]: --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) ~/miniconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj) 700 type_pprinters=self.type_printers, 701 deferred_pprinters=self.deferred_printers) --> 702 printer.pretty(obj) 703 printer.flush() 704 return stream.getvalue() ~/miniconda3/lib/python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj) 392 if cls is not object \ 393 and callable(cls.__dict__.get('__repr__')): --> 394 return _repr_pprint(obj, self, cycle) 395 396 return _default_pprint(obj, self, cycle) ~/miniconda3/lib/python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle) 698 """A pprint that just redirects to the normal repr function.""" 699 # Find newlines and replace them with p.break_() --> 700 output = repr(obj) 701 lines = output.splitlines() 702 with p.group(): ~/Documents/mars_dev/mars/mars/core.py in __repr__(self) 129 130 def __repr__(self): --> 131 return self._data.__repr__() 132 133 def _check_data(self, data): ~/Documents/mars_dev/mars/mars/dataframe/core.py in __repr__(self) 1084 1085 def __repr__(self): -> 1086 return self._to_str(representation=True) 1087 1088 def _repr_html_(self): ~/Documents/mars_dev/mars/mars/dataframe/core.py in _to_str(self, representation) 1057 else: 1058 corner_data = fetch_corner_data( -> 1059 self, session=self._executed_sessions[-1]) 1060 1061 buf = StringIO() ~/Documents/mars_dev/mars/mars/dataframe/utils.py in fetch_corner_data(df_or_series, session) 895 head_data, tail_data = \ 896 ExecutableTuple([head, tail]).fetch(session=session) --> 897 return pd.concat([head_data, tail_data], axis='index') 898 899 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 279 verify_integrity=verify_integrity, 280 copy=copy, --> 281 sort=sort, 282 ) 283 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 358 359 # consolidate --> 360 obj._consolidate(inplace=True) 361 ndims.add(obj.ndim) 362 ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _consolidate(self, inplace) 5363 inplace = validate_bool_kwarg(inplace, "inplace") 5364 if inplace: -> 5365 self._consolidate_inplace() 5366 else: 5367 f = lambda: self._data.consolidate() ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _consolidate_inplace(self) 5345 self._data = self._data.consolidate() 5346 -> 5347 self._protect_consolidate(f) 5348 5349 def _consolidate(self, inplace: bool_t = False): ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _protect_consolidate(self, f) 5333 cache 5334 """ -> 5335 blocks_before = len(self._data.blocks) 5336 result = f() 5337 if len(self._data.blocks) != blocks_before: ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in __getattr__(self, name) 5268 or name in self._accessors 5269 ): -> 5270 return object.__getattribute__(self, name) 5271 else: 5272 if self._info_axis._can_hold_identifiers_and_holds_name(name): AttributeError: 'DataFrame' object has no attribute '_data'
AttributeError
def loads(buf): mv = memoryview(buf) header = read_file_header(mv) compress = header.compress if compress == CompressType.NONE: data = buf[HEADER_LENGTH:] else: data = decompressors[compress](mv[HEADER_LENGTH:]) if header.type == SerialType.ARROW: try: return deserialize(memoryview(data)) except pyarrow.lib.ArrowInvalid: # pragma: no cover # reconstruct value from buffers of arrow components data_view = memoryview(data) meta_block_size = np.frombuffer(data_view[0:4], dtype="int32").item() meta = pickle.loads(data_view[4 : 4 + meta_block_size]) # nosec buffer_sizes = meta.pop("buffer_sizes") bounds = np.cumsum([4 + meta_block_size] + buffer_sizes) meta["data"] = [ pyarrow.py_buffer(data_view[bounds[idx] : bounds[idx + 1]]) for idx in range(len(buffer_sizes)) ] return _patch_pandas_mgr( pyarrow.deserialize_components(meta, mars_serialize_context()) ) else: return _patch_pandas_mgr(pickle.loads(data)) # nosec
def loads(buf): mv = memoryview(buf) header = read_file_header(mv) compress = header.compress if compress == CompressType.NONE: data = buf[HEADER_LENGTH:] else: data = decompressors[compress](mv[HEADER_LENGTH:]) if header.type == SerialType.ARROW: try: return deserialize(memoryview(data)) except pyarrow.lib.ArrowInvalid: # pragma: no cover # reconstruct value from buffers of arrow components data_view = memoryview(data) meta_block_size = np.frombuffer(data_view[0:4], dtype="int32").item() meta = pickle.loads(data_view[4 : 4 + meta_block_size]) # nosec buffer_sizes = meta.pop("buffer_sizes") bounds = np.cumsum([4 + meta_block_size] + buffer_sizes) meta["data"] = [ pyarrow.py_buffer(data_view[bounds[idx] : bounds[idx + 1]]) for idx in range(len(buffer_sizes)) ] return pyarrow.deserialize_components(meta, mars_serialize_context()) else: return pickle.loads(data)
https://github.com/mars-project/mars/issues/1704
In [4]: df.sort_values(by='col1').execute() Out[4]: --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) ~/miniconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj) 700 type_pprinters=self.type_printers, 701 deferred_pprinters=self.deferred_printers) --> 702 printer.pretty(obj) 703 printer.flush() 704 return stream.getvalue() ~/miniconda3/lib/python3.7/site-packages/IPython/lib/pretty.py in pretty(self, obj) 392 if cls is not object \ 393 and callable(cls.__dict__.get('__repr__')): --> 394 return _repr_pprint(obj, self, cycle) 395 396 return _default_pprint(obj, self, cycle) ~/miniconda3/lib/python3.7/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle) 698 """A pprint that just redirects to the normal repr function.""" 699 # Find newlines and replace them with p.break_() --> 700 output = repr(obj) 701 lines = output.splitlines() 702 with p.group(): ~/Documents/mars_dev/mars/mars/core.py in __repr__(self) 129 130 def __repr__(self): --> 131 return self._data.__repr__() 132 133 def _check_data(self, data): ~/Documents/mars_dev/mars/mars/dataframe/core.py in __repr__(self) 1084 1085 def __repr__(self): -> 1086 return self._to_str(representation=True) 1087 1088 def _repr_html_(self): ~/Documents/mars_dev/mars/mars/dataframe/core.py in _to_str(self, representation) 1057 else: 1058 corner_data = fetch_corner_data( -> 1059 self, session=self._executed_sessions[-1]) 1060 1061 buf = StringIO() ~/Documents/mars_dev/mars/mars/dataframe/utils.py in fetch_corner_data(df_or_series, session) 895 head_data, tail_data = \ 896 ExecutableTuple([head, tail]).fetch(session=session) --> 897 return pd.concat([head_data, tail_data], axis='index') 898 899 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 279 verify_integrity=verify_integrity, 280 copy=copy, --> 281 sort=sort, 282 ) 283 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 358 359 # consolidate --> 360 obj._consolidate(inplace=True) 361 ndims.add(obj.ndim) 362 ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _consolidate(self, inplace) 5363 inplace = validate_bool_kwarg(inplace, "inplace") 5364 if inplace: -> 5365 self._consolidate_inplace() 5366 else: 5367 f = lambda: self._data.consolidate() ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _consolidate_inplace(self) 5345 self._data = self._data.consolidate() 5346 -> 5347 self._protect_consolidate(f) 5348 5349 def _consolidate(self, inplace: bool_t = False): ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in _protect_consolidate(self, f) 5333 cache 5334 """ -> 5335 blocks_before = len(self._data.blocks) 5336 result = f() 5337 if len(self._data.blocks) != blocks_before: ~/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py in __getattr__(self, name) 5268 or name in self._accessors 5269 ): -> 5270 return object.__getattribute__(self, name) 5271 else: 5272 if self._info_axis._can_hold_identifiers_and_holds_name(name): AttributeError: 'DataFrame' object has no attribute '_data'
AttributeError
def __init__(self, meta_store=None): self.meta_store = meta_store or RemoteMetaStore.remote()
def __init__(self): self._store = dict()
https://github.com/mars-project/mars/issues/1711
2020-11-17 16:48:29,349 WARNING worker.py:1157 -- Traceback (most recent call last): File "/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py", line 445, in _load_actor_class_from_local actor_class = getattr(module, class_name) AttributeError: module 'mars.ray.core' has no attribute 'RemoteMetaStore' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "python/ray/_raylet.pyx", line 563, in ray._raylet.task_execution_handler File "python/ray/_raylet.pyx", line 567, in ray._raylet.task_execution_handler File "python/ray/_raylet.pyx", line 364, in ray._raylet.execute_task File "/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py", line 394, in load_actor_class job_id, actor_creation_function_descriptor) File "/home/admin/.local/lib/python3.6/site-packages/ray/function_manager.py", line 454, in _load_actor_class_from_local class_name)) RuntimeError: Actor RemoteMetaStore failed to be imported from local code. An unexpected internal error occurred while the worker was executing a task.
AttributeError
def __init__(self, pure_depends=None, axis=None, output_types=None, **kwargs): super().__init__( _pure_depends=pure_depends, _axis=axis, _output_types=output_types, **kwargs )
def __init__(self, prepare_inputs=None, axis=None, output_types=None, **kwargs): super().__init__( _prepare_inputs=prepare_inputs, _axis=axis, _output_types=output_types, **kwargs )
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def standardize_range_index(chunks, axis=0): from .base.standardize_range_index import ChunkStandardizeRangeIndex row_chunks = dict( (k, next(v)) for k, v in itertools.groupby(chunks, key=lambda x: x.index[axis]) ) row_chunks = [row_chunks[i] for i in range(len(row_chunks))] out_chunks = [] for c in chunks: inputs = row_chunks[: c.index[axis]] + [c] op = ChunkStandardizeRangeIndex( pure_depends=[True] * (len(inputs) - 1) + [False], axis=axis, output_types=c.op.output_types, ) out_chunks.append(op.new_chunk(inputs, **c.params.copy())) return out_chunks
def standardize_range_index(chunks, axis=0): from .base.standardize_range_index import ChunkStandardizeRangeIndex row_chunks = dict( (k, next(v)) for k, v in itertools.groupby(chunks, key=lambda x: x.index[axis]) ) row_chunks = [row_chunks[i] for i in range(len(row_chunks))] out_chunks = [] for c in chunks: inputs = row_chunks[: c.index[axis]] + [c] op = ChunkStandardizeRangeIndex( prepare_inputs=[False] * (len(inputs) - 1) + [True], axis=axis, output_types=c.op.output_types, ) out_chunks.append(op.new_chunk(inputs, **c.params.copy())) return out_chunks
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def estimate_size(cls, ctx, op): exec_size = 0 outputs = op.outputs pure_dep_keys = set( inp.key for inp, is_dep in zip(op.inputs or (), op.pure_depends or ()) if is_dep ) if all( not c.is_sparse() and hasattr(c, "nbytes") and not np.isnan(c.nbytes) for c in outputs ): for out in outputs: ctx[out.key] = (out.nbytes, out.nbytes) all_overhead = 0 for inp in op.inputs or (): if inp.key in pure_dep_keys: continue try: if isinstance(inp.op, FetchShuffle): keys_and_shapes = inp.extra_params.get("_shapes", dict()).items() else: keys_and_shapes = [(inp.key, getattr(inp, "shape", None))] # execution size of a specific data chunk may be # larger than stored type due to objects for key, shape in keys_and_shapes: overhead = calc_object_overhead(inp, shape) all_overhead += overhead exec_size += ctx[key][0] + overhead except KeyError: if not op.sparse: inp_size = calc_data_size(inp) if not np.isnan(inp_size): exec_size += inp_size exec_size = int(exec_size) total_out_size = 0 chunk_sizes = dict() for out in outputs: try: if not out.is_sparse(): chunk_size = calc_data_size(out) + all_overhead // len(outputs) else: chunk_size = exec_size if np.isnan(chunk_size): raise TypeError chunk_sizes[out.key] = chunk_size total_out_size += chunk_size except (AttributeError, TypeError, ValueError): pass exec_size = max(exec_size, total_out_size) for out in outputs: if out.key in ctx: continue if out.key in chunk_sizes: store_size = chunk_sizes[out.key] else: store_size = max( exec_size // len(outputs), total_out_size // max(len(chunk_sizes), 1) ) try: if out.is_sparse(): max_sparse_size = ( out.nbytes + np.dtype(np.int64).itemsize * np.prod(out.shape) * out.ndim ) else: max_sparse_size = np.nan except TypeError: # pragma: no cover max_sparse_size = np.nan if not np.isnan(max_sparse_size): store_size = min(store_size, max_sparse_size) ctx[out.key] = (store_size, exec_size // len(outputs))
def estimate_size(cls, ctx, op): exec_size = 0 outputs = op.outputs if all( not c.is_sparse() and hasattr(c, "nbytes") and not np.isnan(c.nbytes) for c in outputs ): for out in outputs: ctx[out.key] = (out.nbytes, out.nbytes) all_overhead = 0 for inp in op.inputs or (): try: if isinstance(inp.op, FetchShuffle): keys_and_shapes = inp.extra_params.get("_shapes", dict()).items() else: keys_and_shapes = [(inp.key, getattr(inp, "shape", None))] # execution size of a specific data chunk may be # larger than stored type due to objects for key, shape in keys_and_shapes: overhead = calc_object_overhead(inp, shape) all_overhead += overhead exec_size += ctx[key][0] + overhead except KeyError: if not op.sparse: inp_size = calc_data_size(inp) if not np.isnan(inp_size): exec_size += inp_size exec_size = int(exec_size) total_out_size = 0 chunk_sizes = dict() for out in outputs: try: if not out.is_sparse(): chunk_size = calc_data_size(out) + all_overhead // len(outputs) else: chunk_size = exec_size if np.isnan(chunk_size): raise TypeError chunk_sizes[out.key] = chunk_size total_out_size += chunk_size except (AttributeError, TypeError, ValueError): pass exec_size = max(exec_size, total_out_size) for out in outputs: if out.key in ctx: continue if out.key in chunk_sizes: store_size = chunk_sizes[out.key] else: store_size = max( exec_size // len(outputs), total_out_size // max(len(chunk_sizes), 1) ) try: if out.is_sparse(): max_sparse_size = ( out.nbytes + np.dtype(np.int64).itemsize * np.prod(out.shape) * out.ndim ) else: max_sparse_size = np.nan except TypeError: # pragma: no cover max_sparse_size = np.nan if not np.isnan(max_sparse_size): store_size = min(store_size, max_sparse_size) ctx[out.key] = (store_size, exec_size // len(outputs))
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def _allocate_resource( self, session_id, op_key, op_info, target_worker=None, reject_workers=None ): """ Allocate resource for single operand :param session_id: session id :param op_key: operand key :param op_info: operand info dict :param target_worker: worker to allocate, can be None :param reject_workers: workers denied to assign to """ if target_worker not in self._worker_metrics: target_worker = None reject_workers = reject_workers or set() op_io_meta = op_info.get("io_meta", {}) try: input_metas = op_io_meta["input_data_metas"] except KeyError: input_metas = self._get_chunks_meta( session_id, op_io_meta.get("input_chunks", {}) ) missing_keys = [k for k, m in input_metas.items() if m is None] if missing_keys: raise DependencyMissing( f"Dependencies {missing_keys!r} missing for operand {op_key}" ) if target_worker is None: input_sizes = dict((k, v.chunk_size) for k, v in input_metas.items()) who_has = dict((k, meta.workers) for k, meta in input_metas.items()) candidate_workers = self._get_eps_by_worker_locality(who_has, input_sizes) else: candidate_workers = [target_worker] candidate_workers = [w for w in candidate_workers if w not in reject_workers] if not candidate_workers: return None, [] # todo make more detailed allocation plans calc_device = op_info.get("calc_device", "cpu") try: mem_usage = self._mem_usage_cache[op_key] except KeyError: pure_dep_keys = set(op_io_meta.get("pure_dep_chunk_keys", ())) mem_usage = self._mem_usage_cache[op_key] = sum( v.chunk_size for k, v in input_metas.items() if k not in pure_dep_keys ) if calc_device == "cpu": alloc_dict = dict(cpu=options.scheduler.default_cpu_usage, mem_quota=mem_usage) elif calc_device == "cuda": alloc_dict = dict( cuda=options.scheduler.default_cuda_usage, mem_quota=mem_usage ) else: # pragma: no cover raise NotImplementedError(f"Calc device {calc_device} not supported") last_assign = self._session_last_assigns.get(session_id, time.time()) timeout_on_fail = time.time() - last_assign > options.scheduler.assign_timeout rejects = [] for worker_ep in candidate_workers: if self._resource_ref.allocate_resource( session_id, op_key, worker_ep, alloc_dict, log_fail=timeout_on_fail ): logger.debug( "Operand %s(%s) allocated to run in %s", op_key, op_info["op_name"], worker_ep, ) self._mem_usage_cache.pop(op_key, None) self.get_actor_ref( BaseOperandActor.gen_uid(session_id, op_key) ).submit_to_worker(worker_ep, input_metas, _tell=True, _wait=False) return worker_ep, rejects else: rejects.append(worker_ep) if timeout_on_fail: running_ops = sum( len(metrics.get("progress", dict()).get(str(session_id), dict())) for metrics in self._worker_metrics.values() ) if running_ops == 0: raise TimeoutError(f"Assign resources to operand {op_key} timed out") else: self._session_last_assigns[session_id] = time.time() return None, rejects
def _allocate_resource( self, session_id, op_key, op_info, target_worker=None, reject_workers=None ): """ Allocate resource for single operand :param session_id: session id :param op_key: operand key :param op_info: operand info dict :param target_worker: worker to allocate, can be None :param reject_workers: workers denied to assign to """ if target_worker not in self._worker_metrics: target_worker = None reject_workers = reject_workers or set() op_io_meta = op_info.get("io_meta", {}) try: input_metas = op_io_meta["input_data_metas"] except KeyError: input_metas = self._get_chunks_meta( session_id, op_io_meta.get("input_chunks", {}) ) missing_keys = [k for k, m in input_metas.items() if m is None] if missing_keys: raise DependencyMissing( f"Dependencies {missing_keys!r} missing for operand {op_key}" ) if target_worker is None: input_sizes = dict((k, v.chunk_size) for k, v in input_metas.items()) who_has = dict((k, meta.workers) for k, meta in input_metas.items()) candidate_workers = self._get_eps_by_worker_locality(who_has, input_sizes) else: candidate_workers = [target_worker] candidate_workers = [w for w in candidate_workers if w not in reject_workers] if not candidate_workers: return None, [] # todo make more detailed allocation plans calc_device = op_info.get("calc_device", "cpu") try: mem_usage = self._mem_usage_cache[op_key] except KeyError: mem_usage = self._mem_usage_cache[op_key] = sum( v.chunk_size for v in input_metas.values() ) if calc_device == "cpu": alloc_dict = dict(cpu=options.scheduler.default_cpu_usage, mem_quota=mem_usage) elif calc_device == "cuda": alloc_dict = dict( cuda=options.scheduler.default_cuda_usage, mem_quota=mem_usage ) else: # pragma: no cover raise NotImplementedError(f"Calc device {calc_device} not supported") last_assign = self._session_last_assigns.get(session_id, time.time()) timeout_on_fail = time.time() - last_assign > options.scheduler.assign_timeout rejects = [] for worker_ep in candidate_workers: if self._resource_ref.allocate_resource( session_id, op_key, worker_ep, alloc_dict, log_fail=timeout_on_fail ): logger.debug( "Operand %s(%s) allocated to run in %s", op_key, op_info["op_name"], worker_ep, ) self._mem_usage_cache.pop(op_key, None) self.get_actor_ref( BaseOperandActor.gen_uid(session_id, op_key) ).submit_to_worker(worker_ep, input_metas, _tell=True, _wait=False) return worker_ep, rejects else: rejects.append(worker_ep) if timeout_on_fail: running_ops = sum( len(metrics.get("progress", dict()).get(str(session_id), dict())) for metrics in self._worker_metrics.values() ) if running_ops == 0: raise TimeoutError(f"Assign resources to operand {op_key} timed out") else: self._session_last_assigns[session_id] = time.time() return None, rejects
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def _collect_operand_io_meta(graph, chunks): # collect operand i/o information predecessor_keys = set() successor_keys = set() input_chunk_keys = set() shared_input_chunk_keys = set() pure_dep_chunk_keys = set() no_prepare_chunk_keys = set() chunk_keys = set() shuffle_keys = dict() predecessors_to_successors = dict() for c in chunks: # handling predecessor args for pn in graph.iter_predecessors(c): if not isinstance(pn.op, Fetch): predecessor_keys.add(pn.op.key) input_chunk_keys.add(pn.key) if graph.count_successors(pn) > 1: shared_input_chunk_keys.add(pn.key) for inp, prep in zip(c.op.inputs or (), c.op.prepare_inputs): if not prep and inp.key in input_chunk_keys: no_prepare_chunk_keys.add(inp.key) for inp, is_dep in zip(c.op.inputs or (), c.op.pure_depends): if is_dep and inp.key in input_chunk_keys: pure_dep_chunk_keys.add(inp.key) # handling successor args for sn in graph.iter_successors(c): successor_keys.add(sn.op.key) if isinstance(c.op, ShuffleProxy): for sn in graph.iter_successors(c): shuffle_keys[sn.op.key] = get_chunk_shuffle_key(sn) if isinstance(c.op, SuccessorsExclusive): for sn in graph.iter_successors(c): predecessors_to_successors[sn.inputs[0].op.key] = sn.op.key chunk_keys.update(co.key for co in c.op.outputs) io_meta = dict( predecessors=list(predecessor_keys), successors=list(successor_keys), input_chunks=list(input_chunk_keys), no_prepare_chunk_keys=list(no_prepare_chunk_keys), pure_dep_chunk_keys=list(pure_dep_chunk_keys), shared_input_chunks=list(shared_input_chunk_keys), chunks=list(chunk_keys), ) if shuffle_keys: io_meta["shuffle_keys"] = [shuffle_keys.get(k) for k in io_meta["successors"]] if predecessors_to_successors: io_meta["predecessors_to_successors"] = predecessors_to_successors return io_meta
def _collect_operand_io_meta(graph, chunks): # collect operand i/o information predecessor_keys = set() successor_keys = set() input_chunk_keys = set() shared_input_chunk_keys = set() no_prepare_chunk_keys = set() chunk_keys = set() shuffle_keys = dict() predecessors_to_successors = dict() for c in chunks: # handling predecessor args for pn in graph.iter_predecessors(c): if not isinstance(pn.op, Fetch): predecessor_keys.add(pn.op.key) input_chunk_keys.add(pn.key) if graph.count_successors(pn) > 1: shared_input_chunk_keys.add(pn.key) for inp, prep in zip(c.op.inputs or (), c.op.prepare_inputs): if not prep and inp.key in input_chunk_keys: no_prepare_chunk_keys.add(inp.key) # handling successor args for sn in graph.iter_successors(c): successor_keys.add(sn.op.key) if isinstance(c.op, ShuffleProxy): for sn in graph.iter_successors(c): shuffle_keys[sn.op.key] = get_chunk_shuffle_key(sn) if isinstance(c.op, SuccessorsExclusive): for sn in graph.iter_successors(c): predecessors_to_successors[sn.inputs[0].op.key] = sn.op.key chunk_keys.update(co.key for co in c.op.outputs) io_meta = dict( predecessors=list(predecessor_keys), successors=list(successor_keys), input_chunks=list(input_chunk_keys), no_prepare_chunk_keys=list(no_prepare_chunk_keys), shared_input_chunks=list(shared_input_chunk_keys), chunks=list(chunk_keys), ) if shuffle_keys: io_meta["shuffle_keys"] = [shuffle_keys.get(k) for k in io_meta["successors"]] if predecessors_to_successors: io_meta["predecessors_to_successors"] = predecessors_to_successors return io_meta
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def _get_keys_to_fetch(graph): from ..operands import Fetch, FetchShuffle fetch_keys = set() exclude_fetch_keys = set() for chunk in graph: if isinstance(chunk.op, Fetch): fetch_keys.add(chunk.op.to_fetch_key or chunk.key) elif isinstance(chunk.op, FetchShuffle): shuffle_key = get_chunk_shuffle_key(graph.successors(chunk)[0]) for k in chunk.op.to_fetch_keys: fetch_keys.add((k, shuffle_key)) else: for inp, prepare_inp, is_dep in zip( chunk.inputs, chunk.op.prepare_inputs, chunk.op.pure_depends ): if not prepare_inp or is_dep: exclude_fetch_keys.add(inp.key) return list(fetch_keys - exclude_fetch_keys)
def _get_keys_to_fetch(graph): from ..operands import Fetch, FetchShuffle fetch_keys = set() exclude_fetch_keys = set() for chunk in graph: if isinstance(chunk.op, Fetch): fetch_keys.add(chunk.op.to_fetch_key or chunk.key) elif isinstance(chunk.op, FetchShuffle): shuffle_key = get_chunk_shuffle_key(graph.successors(chunk)[0]) for k in chunk.op.to_fetch_keys: fetch_keys.add((k, shuffle_key)) else: for inp, prepare_inp in zip(chunk.inputs, chunk.op.prepare_inputs): if not prepare_inp: exclude_fetch_keys.add(inp.key) return list(fetch_keys - exclude_fetch_keys)
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def _calc_results(self, session_id, graph_key, graph, context_dict, chunk_targets): _, op_name = concat_operand_keys(graph, "_") logger.debug("Start calculating operand %s in %s.", graph_key, self.uid) start_time = time.time() for chunk in graph: for inp, prepare_inp, is_dep in zip( chunk.inputs, chunk.op.prepare_inputs, chunk.op.pure_depends ): if not prepare_inp or is_dep: context_dict[inp.key] = None local_context_dict = DistributedDictContext( self.get_scheduler(self.default_uid()), session_id, actor_ctx=self.ctx, address=self.address, n_cpu=self._get_n_cpu(), is_distributed=True, resource_ref=self._resource_ref, ) local_context_dict["_actor_cls"] = type(self) local_context_dict["_actor_uid"] = self.uid local_context_dict["_op_key"] = graph_key local_context_dict.update(context_dict) context_dict.clear() if self._execution_ref: self._execution_ref.deallocate_scheduler_resource( session_id, graph_key, delay=0.5, _tell=True, _wait=False ) # start actual execution executor = Executor(storage=local_context_dict) with EventContext( self._events_ref, EventCategory.PROCEDURE, EventLevel.NORMAL, self._calc_event_type, self.uid, ): self._execution_pool.submit( executor.execute_graph, graph, chunk_targets, retval=False ).result() end_time = time.time() # collect results result_keys = [] result_values, result_sizes, result_shapes = [], [], [] collected_chunk_keys = set() for k in list(local_context_dict.keys()): v = local_context_dict[k] if isinstance(k, tuple): k = tuple(to_str(i) for i in k) else: k = to_str(k) chunk_key = get_chunk_key(k) if chunk_key in chunk_targets: result_keys.append(k) if self._calc_intermediate_device in self._calc_dest_devices: result_values.append(v) result_sizes.append(calc_data_size(v)) else: result_values.append(dataserializer.serialize(v)) result_sizes.append(result_values[-1].total_bytes) result_shapes.append(getattr(v, "shape", None)) collected_chunk_keys.add(chunk_key) local_context_dict.pop(k) # check if all targets generated if any(k not in collected_chunk_keys for k in chunk_targets): raise KeyError([k for k in chunk_targets if k not in collected_chunk_keys]) # adjust sizes in allocation apply_allocs = defaultdict(lambda: 0) for k, size in zip(result_keys, result_sizes): apply_allocs[get_chunk_key(k)] += size apply_alloc_quota_keys, apply_alloc_sizes = [], [] for k, v in apply_allocs.items(): apply_alloc_quota_keys.append( build_quota_key(session_id, k, owner=self.proc_id) ) apply_alloc_sizes.append(v) self._mem_quota_ref.alter_allocations( apply_alloc_quota_keys, apply_alloc_sizes, _tell=True, _wait=False ) self._mem_quota_ref.hold_quotas(apply_alloc_quota_keys, _tell=True) if self._status_ref: self._status_ref.update_mean_stats( "calc_speed." + op_name, sum(apply_alloc_sizes) * 1.0 / (end_time - start_time), _tell=True, _wait=False, ) return self.storage_client.put_objects( session_id, result_keys, result_values, [self._calc_intermediate_device], sizes=result_sizes, shapes=result_shapes, ).then(lambda *_: result_keys)
def _calc_results(self, session_id, graph_key, graph, context_dict, chunk_targets): _, op_name = concat_operand_keys(graph, "_") logger.debug("Start calculating operand %s in %s.", graph_key, self.uid) start_time = time.time() for chunk in graph: for inp, prepare_inp in zip(chunk.inputs, chunk.op.prepare_inputs): if not prepare_inp: context_dict[inp.key] = None local_context_dict = DistributedDictContext( self.get_scheduler(self.default_uid()), session_id, actor_ctx=self.ctx, address=self.address, n_cpu=self._get_n_cpu(), is_distributed=True, resource_ref=self._resource_ref, ) local_context_dict["_actor_cls"] = type(self) local_context_dict["_actor_uid"] = self.uid local_context_dict["_op_key"] = graph_key local_context_dict.update(context_dict) context_dict.clear() if self._execution_ref: self._execution_ref.deallocate_scheduler_resource( session_id, graph_key, delay=0.5, _tell=True, _wait=False ) # start actual execution executor = Executor(storage=local_context_dict) with EventContext( self._events_ref, EventCategory.PROCEDURE, EventLevel.NORMAL, self._calc_event_type, self.uid, ): self._execution_pool.submit( executor.execute_graph, graph, chunk_targets, retval=False ).result() end_time = time.time() # collect results result_keys = [] result_values, result_sizes, result_shapes = [], [], [] collected_chunk_keys = set() for k in list(local_context_dict.keys()): v = local_context_dict[k] if isinstance(k, tuple): k = tuple(to_str(i) for i in k) else: k = to_str(k) chunk_key = get_chunk_key(k) if chunk_key in chunk_targets: result_keys.append(k) if self._calc_intermediate_device in self._calc_dest_devices: result_values.append(v) result_sizes.append(calc_data_size(v)) else: result_values.append(dataserializer.serialize(v)) result_sizes.append(result_values[-1].total_bytes) result_shapes.append(getattr(v, "shape", None)) collected_chunk_keys.add(chunk_key) local_context_dict.pop(k) # check if all targets generated if any(k not in collected_chunk_keys for k in chunk_targets): raise KeyError([k for k in chunk_targets if k not in collected_chunk_keys]) # adjust sizes in allocation apply_allocs = defaultdict(lambda: 0) for k, size in zip(result_keys, result_sizes): apply_allocs[get_chunk_key(k)] += size apply_alloc_quota_keys, apply_alloc_sizes = [], [] for k, v in apply_allocs.items(): apply_alloc_quota_keys.append( build_quota_key(session_id, k, owner=self.proc_id) ) apply_alloc_sizes.append(v) self._mem_quota_ref.alter_allocations( apply_alloc_quota_keys, apply_alloc_sizes, _tell=True, _wait=False ) self._mem_quota_ref.hold_quotas(apply_alloc_quota_keys, _tell=True) if self._status_ref: self._status_ref.update_mean_stats( "calc_speed." + op_name, sum(apply_alloc_sizes) * 1.0 / (end_time - start_time), _tell=True, _wait=False, ) return self.storage_client.put_objects( session_id, result_keys, result_values, [self._calc_intermediate_device], sizes=result_sizes, shapes=result_shapes, ).then(lambda *_: result_keys)
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def __init__( self, graph_serialized, state, chunk_targets=None, data_targets=None, io_meta=None, data_metas=None, mem_request=None, shared_input_chunks=None, pinned_keys=None, mem_overhead_keys=None, est_finish_time=None, calc_actor_uid=None, send_addresses=None, retry_delay=None, finish_callbacks=None, stop_requested=False, calc_device=None, preferred_data_device=None, resource_released=False, no_prepare_chunk_keys=None, pure_dep_chunk_keys=None, ): self.graph_serialized = graph_serialized graph = self.graph = deserialize_graph(graph_serialized) self._state = state self.state_time = time.time() self.data_targets = data_targets or [] self.chunk_targets = chunk_targets or [] self.io_meta = io_meta or dict() self.data_metas = data_metas or dict() self.shared_input_chunks = shared_input_chunks or set() self.mem_request = mem_request or dict() self.pinned_keys = set(pinned_keys or []) self.mem_overhead_keys = set(mem_overhead_keys or []) self.est_finish_time = est_finish_time or time.time() self.calc_actor_uid = calc_actor_uid self.send_addresses = send_addresses self.retry_delay = retry_delay or 0 self.retry_pending = False self.finish_callbacks = finish_callbacks or [] self.stop_requested = stop_requested or False self.calc_device = calc_device self.preferred_data_device = preferred_data_device self.resource_released = resource_released self.no_prepare_chunk_keys = no_prepare_chunk_keys or set() self.pure_dep_chunk_keys = pure_dep_chunk_keys or set() _, self.op_string = concat_operand_keys(graph)
def __init__( self, graph_serialized, state, chunk_targets=None, data_targets=None, io_meta=None, data_metas=None, mem_request=None, shared_input_chunks=None, pinned_keys=None, mem_overhead_keys=None, est_finish_time=None, calc_actor_uid=None, send_addresses=None, retry_delay=None, finish_callbacks=None, stop_requested=False, calc_device=None, preferred_data_device=None, resource_released=False, no_prepare_chunk_keys=None, ): self.graph_serialized = graph_serialized graph = self.graph = deserialize_graph(graph_serialized) self._state = state self.state_time = time.time() self.data_targets = data_targets or [] self.chunk_targets = chunk_targets or [] self.io_meta = io_meta or dict() self.data_metas = data_metas or dict() self.shared_input_chunks = shared_input_chunks or set() self.mem_request = mem_request or dict() self.pinned_keys = set(pinned_keys or []) self.mem_overhead_keys = set(mem_overhead_keys or []) self.est_finish_time = est_finish_time or time.time() self.calc_actor_uid = calc_actor_uid self.send_addresses = send_addresses self.retry_delay = retry_delay or 0 self.retry_pending = False self.finish_callbacks = finish_callbacks or [] self.stop_requested = stop_requested or False self.calc_device = calc_device self.preferred_data_device = preferred_data_device self.resource_released = resource_released self.no_prepare_chunk_keys = no_prepare_chunk_keys or set() _, self.op_string = concat_operand_keys(graph)
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def _prepare_quota_request(self, session_id, graph_key): """ Calculate quota request for an execution graph :param session_id: session id :param graph_key: key of the execution graph :return: allocation dict """ try: graph_record = self._graph_records[(session_id, graph_key)] except KeyError: return None graph = graph_record.graph storage_client = self.storage_client input_data_sizes = dict( (k, v.chunk_size) for k, v in graph_record.data_metas.items() ) alloc_mem_batch = dict() alloc_cache_batch = dict() input_chunk_keys = dict() if self._status_ref: self.estimate_graph_finish_time(session_id, graph_key) if ( graph_record.preferred_data_device == DataStorageDevice.SHARED_MEMORY or graph_record.preferred_data_device == DataStorageDevice.VINEYARD ): # pragma: no cover memory_estimations = self._estimate_calc_memory(session_id, graph_key) else: memory_estimations = dict() graph_record.mem_overhead_keys = set() # collect potential allocation sizes for chunk in graph: op = chunk.op overhead_keys_and_shapes = [] if isinstance(op, Fetch): if ( chunk.key in graph_record.no_prepare_chunk_keys or chunk.key in graph_record.pure_dep_chunk_keys ): continue # use actual size as potential allocation size input_chunk_keys[chunk.key] = input_data_sizes.get( chunk.key ) or calc_data_size(chunk) overhead_keys_and_shapes = [(chunk.key, getattr(chunk, "shape", None))] elif isinstance(op, FetchShuffle): shuffle_key = get_chunk_shuffle_key(graph.successors(chunk)[0]) overhead_keys_and_shapes = chunk.extra_params.get("_shapes", dict()).items() for k in op.to_fetch_keys: part_key = (k, shuffle_key) try: input_chunk_keys[part_key] = input_data_sizes[part_key] except KeyError: pass elif chunk.key in graph_record.chunk_targets: # use estimated size as potential allocation size estimation_data = memory_estimations.get(chunk.key) if not estimation_data: continue quota_key = build_quota_key(session_id, chunk.key, owner=graph_key) cache_batch, alloc_mem_batch[quota_key] = estimation_data if not isinstance(chunk.key, tuple): alloc_cache_batch[chunk.key] = cache_batch for key, shape in overhead_keys_and_shapes: overhead = calc_object_overhead(chunk, shape) if overhead: graph_record.mem_overhead_keys.add(key) quota_key = build_quota_key(session_id, key, owner=graph_key) alloc_mem_batch[quota_key] = overhead keys_to_pin = list(input_chunk_keys.keys()) graph_record.pinned_keys = set() self._pin_shared_data_keys(session_id, graph_key, keys_to_pin) for k, v in input_chunk_keys.items(): quota_key = build_quota_key(session_id, k, owner=graph_key) if quota_key not in alloc_mem_batch: if k in graph_record.pinned_keys or k in graph_record.shared_input_chunks: continue alloc_mem_batch[quota_key] = alloc_mem_batch.get(quota_key, 0) + v if alloc_cache_batch: storage_client.spill_size( sum(alloc_cache_batch.values()), [graph_record.preferred_data_device] ) graph_record.mem_request = alloc_mem_batch or dict() return alloc_mem_batch
def _prepare_quota_request(self, session_id, graph_key): """ Calculate quota request for an execution graph :param session_id: session id :param graph_key: key of the execution graph :return: allocation dict """ try: graph_record = self._graph_records[(session_id, graph_key)] except KeyError: return None graph = graph_record.graph storage_client = self.storage_client input_data_sizes = dict( (k, v.chunk_size) for k, v in graph_record.data_metas.items() ) alloc_mem_batch = dict() alloc_cache_batch = dict() input_chunk_keys = dict() if self._status_ref: self.estimate_graph_finish_time(session_id, graph_key) if ( graph_record.preferred_data_device == DataStorageDevice.SHARED_MEMORY or graph_record.preferred_data_device == DataStorageDevice.VINEYARD ): # pragma: no cover memory_estimations = self._estimate_calc_memory(session_id, graph_key) else: memory_estimations = dict() graph_record.mem_overhead_keys = set() # collect potential allocation sizes for chunk in graph: op = chunk.op overhead_keys_and_shapes = [] if isinstance(op, Fetch): if chunk.key in graph_record.no_prepare_chunk_keys: continue # use actual size as potential allocation size input_chunk_keys[chunk.key] = input_data_sizes.get( chunk.key ) or calc_data_size(chunk) overhead_keys_and_shapes = [(chunk.key, getattr(chunk, "shape", None))] elif isinstance(op, FetchShuffle): shuffle_key = get_chunk_shuffle_key(graph.successors(chunk)[0]) overhead_keys_and_shapes = chunk.extra_params.get("_shapes", dict()).items() for k in op.to_fetch_keys: part_key = (k, shuffle_key) try: input_chunk_keys[part_key] = input_data_sizes[part_key] except KeyError: pass elif chunk.key in graph_record.chunk_targets: # use estimated size as potential allocation size estimation_data = memory_estimations.get(chunk.key) if not estimation_data: continue quota_key = build_quota_key(session_id, chunk.key, owner=graph_key) cache_batch, alloc_mem_batch[quota_key] = estimation_data if not isinstance(chunk.key, tuple): alloc_cache_batch[chunk.key] = cache_batch for key, shape in overhead_keys_and_shapes: overhead = calc_object_overhead(chunk, shape) if overhead: graph_record.mem_overhead_keys.add(key) quota_key = build_quota_key(session_id, key, owner=graph_key) alloc_mem_batch[quota_key] = overhead keys_to_pin = list(input_chunk_keys.keys()) graph_record.pinned_keys = set() self._pin_shared_data_keys(session_id, graph_key, keys_to_pin) for k, v in input_chunk_keys.items(): quota_key = build_quota_key(session_id, k, owner=graph_key) if quota_key not in alloc_mem_batch: if k in graph_record.pinned_keys or k in graph_record.shared_input_chunks: continue alloc_mem_batch[quota_key] = alloc_mem_batch.get(quota_key, 0) + v if alloc_cache_batch: storage_client.spill_size( sum(alloc_cache_batch.values()), [graph_record.preferred_data_device] ) graph_record.mem_request = alloc_mem_batch or dict() return alloc_mem_batch
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def execute_graph( self, session_id, graph_key, graph_ser, io_meta, data_metas, calc_device=None, send_addresses=None, callback=None, ): """ Submit graph to the worker and control the execution :param session_id: session id :param graph_key: graph key :param graph_ser: serialized executable graph :param io_meta: io meta of the chunk :param data_metas: data meta of each input chunk, as a dict :param calc_device: device for calculation, can be 'gpu' or 'cpu' :param send_addresses: targets to send results after execution :param callback: promise callback """ session_graph_key = (session_id, graph_key) callback = callback or [] if not isinstance(callback, list): callback = [callback] try: all_callbacks = self._graph_records[session_graph_key].finish_callbacks or [] self._graph_records[session_graph_key].finish_callbacks.extend(callback) if not self._graph_records[session_graph_key].retry_pending: self._graph_records[session_graph_key].finish_callbacks = ( all_callbacks + callback ) return except KeyError: all_callbacks = [] all_callbacks.extend(callback) calc_device = calc_device or "cpu" if calc_device == "cpu": # pragma: no cover if options.vineyard.socket: preferred_data_device = DataStorageDevice.VINEYARD else: preferred_data_device = DataStorageDevice.SHARED_MEMORY else: preferred_data_device = DataStorageDevice.CUDA # todo change this when handling multiple devices if preferred_data_device == DataStorageDevice.CUDA: slot = self._dispatch_ref.get_slots(calc_device)[0] proc_id = self.ctx.distributor.distribute(slot) preferred_data_device = (proc_id, preferred_data_device) graph_record = self._graph_records[(session_id, graph_key)] = GraphExecutionRecord( graph_ser, ExecutionState.ALLOCATING, io_meta=io_meta, data_metas=data_metas, chunk_targets=io_meta["chunks"], data_targets=list(io_meta.get("data_targets") or io_meta["chunks"]), shared_input_chunks=set(io_meta.get("shared_input_chunks", [])), send_addresses=send_addresses, finish_callbacks=all_callbacks, calc_device=calc_device, preferred_data_device=preferred_data_device, no_prepare_chunk_keys=io_meta.get("no_prepare_chunk_keys") or set(), pure_dep_chunk_keys=io_meta.get("pure_dep_chunk_keys") or set(), ) _, long_op_string = concat_operand_keys(graph_record.graph, decompose=True) if long_op_string != graph_record.op_string: long_op_string = graph_record.op_string + ":" + long_op_string logger.debug( "Worker graph %s(%s) targeting at %r accepted.", graph_key, long_op_string, graph_record.chunk_targets, ) self._update_state(session_id, graph_key, ExecutionState.ALLOCATING) try: del self._result_cache[session_graph_key] except KeyError: pass @log_unhandled def _handle_success(*_): self._invoke_finish_callbacks(session_id, graph_key) @log_unhandled def _handle_rejection(*exc): # some error occurred... logger.debug("Entering _handle_rejection() for graph %s", graph_key) self._dump_execution_states() if graph_record.stop_requested: graph_record.stop_requested = False if not isinstance(exc[1], ExecutionInterrupted): exc = build_exc_info(ExecutionInterrupted) if isinstance(exc[1], ExecutionInterrupted): logger.warning("Execution of graph %s interrupted.", graph_key) else: logger.exception( "Unexpected error occurred in executing graph %s", graph_key, exc_info=exc, ) self._result_cache[(session_id, graph_key)] = GraphResultRecord( *exc, succeeded=False ) self._invoke_finish_callbacks(session_id, graph_key) # collect target data already computed attrs = self.storage_client.get_data_attrs(session_id, graph_record.data_targets) save_attrs = dict((k, v) for k, v in zip(graph_record.data_targets, attrs) if v) # when all target data are computed, report success directly if all(k in save_attrs for k in graph_record.data_targets): logger.debug( "All predecessors of graph %s already computed, call finish directly.", graph_key, ) sizes = dict((k, v.size) for k, v in save_attrs.items()) shapes = dict((k, v.shape) for k, v in save_attrs.items()) self._result_cache[(session_id, graph_key)] = GraphResultRecord(sizes, shapes) _handle_success() else: try: quota_request = self._prepare_quota_request(session_id, graph_key) except PinDataKeyFailed: logger.debug("Failed to pin chunk for graph %s", graph_key) # cannot pin input chunks: retry later retry_delay = graph_record.retry_delay + 0.5 + random.random() graph_record.retry_delay = min(1 + graph_record.retry_delay, 30) graph_record.retry_pending = True self.ref().execute_graph( session_id, graph_key, graph_record.graph_serialized, graph_record.io_meta, graph_record.data_metas, calc_device=calc_device, send_addresses=send_addresses, _tell=True, _delay=retry_delay, ) return promise.finished().then( lambda *_: self._mem_quota_ref.request_batch_quota( quota_request, _promise=True ) if quota_request else None ).then(lambda *_: self._prepare_graph_inputs(session_id, graph_key)).then( lambda *_: self._dispatch_ref.acquire_free_slot(calc_device, _promise=True) ).then(lambda uid: self._send_calc_request(session_id, graph_key, uid)).then( lambda saved_keys: self._store_results(session_id, graph_key, saved_keys) ).then(_handle_success, _handle_rejection)
def execute_graph( self, session_id, graph_key, graph_ser, io_meta, data_metas, calc_device=None, send_addresses=None, callback=None, ): """ Submit graph to the worker and control the execution :param session_id: session id :param graph_key: graph key :param graph_ser: serialized executable graph :param io_meta: io meta of the chunk :param data_metas: data meta of each input chunk, as a dict :param calc_device: device for calculation, can be 'gpu' or 'cpu' :param send_addresses: targets to send results after execution :param callback: promise callback """ session_graph_key = (session_id, graph_key) callback = callback or [] if not isinstance(callback, list): callback = [callback] try: all_callbacks = self._graph_records[session_graph_key].finish_callbacks or [] self._graph_records[session_graph_key].finish_callbacks.extend(callback) if not self._graph_records[session_graph_key].retry_pending: self._graph_records[session_graph_key].finish_callbacks = ( all_callbacks + callback ) return except KeyError: all_callbacks = [] all_callbacks.extend(callback) calc_device = calc_device or "cpu" if calc_device == "cpu": # pragma: no cover if options.vineyard.socket: preferred_data_device = DataStorageDevice.VINEYARD else: preferred_data_device = DataStorageDevice.SHARED_MEMORY else: preferred_data_device = DataStorageDevice.CUDA # todo change this when handling multiple devices if preferred_data_device == DataStorageDevice.CUDA: slot = self._dispatch_ref.get_slots(calc_device)[0] proc_id = self.ctx.distributor.distribute(slot) preferred_data_device = (proc_id, preferred_data_device) graph_record = self._graph_records[(session_id, graph_key)] = GraphExecutionRecord( graph_ser, ExecutionState.ALLOCATING, io_meta=io_meta, data_metas=data_metas, chunk_targets=io_meta["chunks"], data_targets=list(io_meta.get("data_targets") or io_meta["chunks"]), shared_input_chunks=set(io_meta.get("shared_input_chunks", [])), send_addresses=send_addresses, finish_callbacks=all_callbacks, calc_device=calc_device, preferred_data_device=preferred_data_device, no_prepare_chunk_keys=io_meta.get("no_prepare_chunk_keys") or set(), ) _, long_op_string = concat_operand_keys(graph_record.graph, decompose=True) if long_op_string != graph_record.op_string: long_op_string = graph_record.op_string + ":" + long_op_string logger.debug( "Worker graph %s(%s) targeting at %r accepted.", graph_key, long_op_string, graph_record.chunk_targets, ) self._update_state(session_id, graph_key, ExecutionState.ALLOCATING) try: del self._result_cache[session_graph_key] except KeyError: pass @log_unhandled def _handle_success(*_): self._invoke_finish_callbacks(session_id, graph_key) @log_unhandled def _handle_rejection(*exc): # some error occurred... logger.debug("Entering _handle_rejection() for graph %s", graph_key) self._dump_execution_states() if graph_record.stop_requested: graph_record.stop_requested = False if not isinstance(exc[1], ExecutionInterrupted): exc = build_exc_info(ExecutionInterrupted) if isinstance(exc[1], ExecutionInterrupted): logger.warning("Execution of graph %s interrupted.", graph_key) else: logger.exception( "Unexpected error occurred in executing graph %s", graph_key, exc_info=exc, ) self._result_cache[(session_id, graph_key)] = GraphResultRecord( *exc, succeeded=False ) self._invoke_finish_callbacks(session_id, graph_key) # collect target data already computed attrs = self.storage_client.get_data_attrs(session_id, graph_record.data_targets) save_attrs = dict((k, v) for k, v in zip(graph_record.data_targets, attrs) if v) # when all target data are computed, report success directly if all(k in save_attrs for k in graph_record.data_targets): logger.debug( "All predecessors of graph %s already computed, call finish directly.", graph_key, ) sizes = dict((k, v.size) for k, v in save_attrs.items()) shapes = dict((k, v.shape) for k, v in save_attrs.items()) self._result_cache[(session_id, graph_key)] = GraphResultRecord(sizes, shapes) _handle_success() else: try: quota_request = self._prepare_quota_request(session_id, graph_key) except PinDataKeyFailed: logger.debug("Failed to pin chunk for graph %s", graph_key) # cannot pin input chunks: retry later retry_delay = graph_record.retry_delay + 0.5 + random.random() graph_record.retry_delay = min(1 + graph_record.retry_delay, 30) graph_record.retry_pending = True self.ref().execute_graph( session_id, graph_key, graph_record.graph_serialized, graph_record.io_meta, graph_record.data_metas, calc_device=calc_device, send_addresses=send_addresses, _tell=True, _delay=retry_delay, ) return promise.finished().then( lambda *_: self._mem_quota_ref.request_batch_quota( quota_request, _promise=True ) if quota_request else None ).then(lambda *_: self._prepare_graph_inputs(session_id, graph_key)).then( lambda *_: self._dispatch_ref.acquire_free_slot(calc_device, _promise=True) ).then(lambda uid: self._send_calc_request(session_id, graph_key, uid)).then( lambda saved_keys: self._store_results(session_id, graph_key, saved_keys) ).then(_handle_success, _handle_rejection)
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def _prepare_graph_inputs(self, session_id, graph_key): """ Load input data from spilled storage and other workers :param session_id: session id :param graph_key: key of the execution graph """ storage_client = self.storage_client graph_record = self._graph_records[(session_id, graph_key)] graph = graph_record.graph input_metas = graph_record.io_meta.get("input_data_metas", dict()) if graph_record.stop_requested: raise ExecutionInterrupted logger.debug("Start preparing input data for graph %s", graph_key) self._update_state(session_id, graph_key, ExecutionState.PREPARING_INPUTS) prepare_promises = [] input_keys = set() shuffle_keys = set() for chunk in graph: op = chunk.op if isinstance(op, Fetch): if ( chunk.key in graph_record.no_prepare_chunk_keys or chunk.key in graph_record.pure_dep_chunk_keys ): continue input_keys.add(op.to_fetch_key or chunk.key) elif isinstance(op, FetchShuffle): shuffle_key = get_chunk_shuffle_key(graph.successors(chunk)[0]) for input_key in op.to_fetch_keys: part_key = (input_key, shuffle_key) input_keys.add(part_key) shuffle_keys.add(part_key) local_keys = graph_record.pinned_keys & set(input_keys) non_local_keys = [k for k in input_keys if k not in local_keys] non_local_locations = storage_client.get_data_locations(session_id, non_local_keys) copy_keys = set(k for k, loc in zip(non_local_keys, non_local_locations) if loc) remote_keys = [k for k in non_local_keys if k not in copy_keys] # handle local keys self._release_shared_store_quotas(session_id, graph_key, local_keys) # handle move keys prepare_promises.extend(self._prepare_copy_keys(session_id, graph_key, copy_keys)) # handle remote keys prepare_promises.extend( self._prepare_remote_keys(session_id, graph_key, remote_keys, input_metas) ) logger.debug( "Graph key %s: Targets %r, loaded keys %r, copy keys %s, remote keys %r", graph_key, graph_record.chunk_targets, local_keys, copy_keys, remote_keys, ) p = promise.all_(prepare_promises).then( lambda *_: logger.debug("Data preparation for graph %s finished", graph_key) ) return p
def _prepare_graph_inputs(self, session_id, graph_key): """ Load input data from spilled storage and other workers :param session_id: session id :param graph_key: key of the execution graph """ storage_client = self.storage_client graph_record = self._graph_records[(session_id, graph_key)] graph = graph_record.graph input_metas = graph_record.io_meta.get("input_data_metas", dict()) if graph_record.stop_requested: raise ExecutionInterrupted logger.debug("Start preparing input data for graph %s", graph_key) self._update_state(session_id, graph_key, ExecutionState.PREPARING_INPUTS) prepare_promises = [] input_keys = set() shuffle_keys = set() for chunk in graph: op = chunk.op if isinstance(op, Fetch): if chunk.key in graph_record.no_prepare_chunk_keys: continue input_keys.add(op.to_fetch_key or chunk.key) elif isinstance(op, FetchShuffle): shuffle_key = get_chunk_shuffle_key(graph.successors(chunk)[0]) for input_key in op.to_fetch_keys: part_key = (input_key, shuffle_key) input_keys.add(part_key) shuffle_keys.add(part_key) local_keys = graph_record.pinned_keys & set(input_keys) non_local_keys = [k for k in input_keys if k not in local_keys] non_local_locations = storage_client.get_data_locations(session_id, non_local_keys) copy_keys = set(k for k, loc in zip(non_local_keys, non_local_locations) if loc) remote_keys = [k for k in non_local_keys if k not in copy_keys] # handle local keys self._release_shared_store_quotas(session_id, graph_key, local_keys) # handle move keys prepare_promises.extend(self._prepare_copy_keys(session_id, graph_key, copy_keys)) # handle remote keys prepare_promises.extend( self._prepare_remote_keys(session_id, graph_key, remote_keys, input_metas) ) logger.debug( "Graph key %s: Targets %r, loaded keys %r, copy keys %s, remote keys %r", graph_key, graph_record.chunk_targets, local_keys, copy_keys, remote_keys, ) p = promise.all_(prepare_promises).then( lambda *_: logger.debug("Data preparation for graph %s finished", graph_key) ) return p
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def collect_status(self): """ Collect worker status and write to kvstore """ meta_dict = dict() try: if not self._upload_status: return cpu_percent = resource.cpu_percent() disk_io = resource.disk_io_usage() net_io = resource.net_io_usage() if cpu_percent is None or disk_io is None or net_io is None: return hw_metrics = dict() hw_metrics["cpu"] = max(0.0, resource.cpu_count() - cpu_percent / 100.0) hw_metrics["cpu_used"] = cpu_percent / 100.0 hw_metrics["cpu_total"] = resource.cpu_count() cuda_info = resource.cuda_info() if self._with_gpu else None if cuda_info: hw_metrics["cuda"] = cuda_info.gpu_count hw_metrics["cuda_total"] = cuda_info.gpu_count hw_metrics["disk_read"] = disk_io[0] hw_metrics["disk_write"] = disk_io[1] hw_metrics["net_receive"] = net_io[0] hw_metrics["net_send"] = net_io[1] iowait = resource.iowait() if iowait is not None: hw_metrics["iowait"] = iowait mem_stats = resource.virtual_memory() hw_metrics["memory"] = int(mem_stats.available) hw_metrics["memory_used"] = int(mem_stats.used) hw_metrics["memory_total"] = int(mem_stats.total) cache_allocations = self._status_ref.get_cache_allocations() cache_total = cache_allocations.get("total", 0) hw_metrics["cached_total"] = int(cache_total) hw_metrics["cached_hold"] = int(cache_allocations.get("hold", 0)) mem_quota_allocations = self._status_ref.get_mem_quota_allocations() mem_quota_total = mem_quota_allocations.get("total", 0) mem_quota_allocated = mem_quota_allocations.get("allocated", 0) if not mem_quota_allocations: hw_metrics["mem_quota"] = hw_metrics["memory"] hw_metrics["mem_quota_used"] = hw_metrics["memory_used"] hw_metrics["mem_quota_total"] = hw_metrics["memory_total"] hw_metrics["mem_quota_hold"] = 0 else: hw_metrics["mem_quota"] = ( int(mem_quota_total - mem_quota_allocated) or hw_metrics["memory"] ) hw_metrics["mem_quota_used"] = int(mem_quota_allocated) hw_metrics["mem_quota_total"] = int(mem_quota_total) hw_metrics["mem_quota_hold"] = int(mem_quota_allocations.get("hold", 0)) if options.worker.spill_directory: if isinstance(options.worker.spill_directory, str): spill_dirs = options.worker.spill_directory.split(":") else: spill_dirs = options.worker.spill_directory if spill_dirs and "disk_stats" not in hw_metrics: hw_metrics["disk_stats"] = dict() disk_stats = hw_metrics["disk_stats"] agg_disk_used = agg_disk_total = 0.0 agg_inode_used = agg_inode_total = 0 for spill_dir in spill_dirs: if not os.path.exists(spill_dir): continue if spill_dir not in disk_stats: disk_stats[spill_dir] = dict() disk_usage = resource.disk_usage(spill_dir) disk_stats[spill_dir]["disk_total"] = disk_usage.total agg_disk_total += disk_usage.total disk_stats[spill_dir]["disk_used"] = disk_usage.used agg_disk_used += disk_usage.used vfs_stat = os.statvfs(spill_dir) disk_stats[spill_dir]["inode_total"] = vfs_stat.f_files agg_inode_total += vfs_stat.f_files disk_stats[spill_dir]["inode_used"] = ( vfs_stat.f_files - vfs_stat.f_favail ) agg_inode_used += vfs_stat.f_files - vfs_stat.f_favail hw_metrics["disk_used"] = agg_disk_used hw_metrics["disk_total"] = agg_disk_total hw_metrics["inode_used"] = agg_inode_used hw_metrics["inode_total"] = agg_inode_total cuda_card_stats = resource.cuda_card_stats() if self._with_gpu else None if cuda_card_stats: hw_metrics["cuda_stats"] = [ dict( product_name=stat.product_name, gpu_usage=stat.gpu_usage, temperature=stat.temperature, fb_memory_total=stat.fb_mem_info.total, fb_memory_used=stat.fb_mem_info.used, ) for stat in cuda_card_stats ] meta_dict = dict() meta_dict["hardware"] = hw_metrics meta_dict["update_time"] = time.time() meta_dict["stats"] = dict() meta_dict["slots"] = dict() status_data = self._status_ref.get_stats() for k, v in status_data.items(): meta_dict["stats"][k] = v slots_data = self._status_ref.get_slots() for k, v in slots_data.items(): meta_dict["slots"][k] = v meta_dict["progress"] = self._status_ref.get_progress() meta_dict["details"] = gather_node_info() if options.vineyard.socket: # pragma: no cover import vineyard client = vineyard.connect(options.vineyard.socket) meta_dict["vineyard"] = {"instance_id": client.instance_id} self._resource_ref.set_worker_meta(self._endpoint, meta_dict) except Exception as ex: logger.error( "Failed to save status: %s. repr(meta_dict)=%r", str(ex), meta_dict ) finally: self.ref().collect_status(_tell=True, _delay=1)
def collect_status(self): """ Collect worker status and write to kvstore """ meta_dict = dict() try: if not self._upload_status: return cpu_percent = resource.cpu_percent() disk_io = resource.disk_io_usage() net_io = resource.net_io_usage() if cpu_percent is None or disk_io is None or net_io is None: return hw_metrics = dict() hw_metrics["cpu"] = max(0.0, resource.cpu_count() - cpu_percent / 100.0) hw_metrics["cpu_used"] = cpu_percent / 100.0 hw_metrics["cpu_total"] = resource.cpu_count() cuda_info = resource.cuda_info() if self._with_gpu else None if cuda_info: hw_metrics["cuda"] = cuda_info.gpu_count hw_metrics["cuda_total"] = cuda_info.gpu_count hw_metrics["disk_read"] = disk_io[0] hw_metrics["disk_write"] = disk_io[1] hw_metrics["net_receive"] = net_io[0] hw_metrics["net_send"] = net_io[1] iowait = resource.iowait() if iowait is not None: hw_metrics["iowait"] = iowait mem_stats = resource.virtual_memory() hw_metrics["memory"] = int(mem_stats.available) hw_metrics["memory_used"] = int(mem_stats.used) hw_metrics["memory_total"] = int(mem_stats.total) cache_allocations = self._status_ref.get_cache_allocations() cache_total = cache_allocations.get("total", 0) hw_metrics["cached_total"] = int(cache_total) hw_metrics["cached_hold"] = int(cache_allocations.get("hold", 0)) mem_quota_allocations = self._status_ref.get_mem_quota_allocations() mem_quota_total = mem_quota_allocations.get("total", 0) mem_quota_allocated = mem_quota_allocations.get("allocated", 0) if not mem_quota_allocations: hw_metrics["mem_quota"] = hw_metrics["memory"] hw_metrics["mem_quota_used"] = hw_metrics["memory_used"] hw_metrics["mem_quota_total"] = hw_metrics["memory_total"] hw_metrics["mem_quota_hold"] = 0 else: hw_metrics["mem_quota"] = ( int(mem_quota_total - mem_quota_allocated) or hw_metrics["memory"] ) hw_metrics["mem_quota_used"] = int(mem_quota_allocated) hw_metrics["mem_quota_total"] = int(mem_quota_total) hw_metrics["mem_quota_hold"] = int(mem_quota_allocations.get("hold", 0)) if options.worker.spill_directory: if isinstance(options.worker.spill_directory, str): spill_dirs = options.worker.spill_directory.split(":") else: spill_dirs = options.worker.spill_directory if spill_dirs and "disk_stats" not in hw_metrics: hw_metrics["disk_stats"] = dict() disk_stats = hw_metrics["disk_stats"] agg_disk_used = 0.0 agg_disk_total = 0.0 for spill_dir in spill_dirs: if not os.path.exists(spill_dir): continue if spill_dir not in disk_stats: disk_stats[spill_dir] = dict() disk_usage = resource.disk_usage(spill_dir) disk_stats[spill_dir]["disk_total"] = disk_usage.total agg_disk_total += disk_usage.total disk_stats[spill_dir]["disk_used"] = disk_usage.used agg_disk_used += disk_usage.used hw_metrics["disk_used"] = agg_disk_used hw_metrics["disk_total"] = agg_disk_total cuda_card_stats = resource.cuda_card_stats() if self._with_gpu else None if cuda_card_stats: hw_metrics["cuda_stats"] = [ dict( product_name=stat.product_name, gpu_usage=stat.gpu_usage, temperature=stat.temperature, fb_memory_total=stat.fb_mem_info.total, fb_memory_used=stat.fb_mem_info.used, ) for stat in cuda_card_stats ] meta_dict = dict() meta_dict["hardware"] = hw_metrics meta_dict["update_time"] = time.time() meta_dict["stats"] = dict() meta_dict["slots"] = dict() status_data = self._status_ref.get_stats() for k, v in status_data.items(): meta_dict["stats"][k] = v slots_data = self._status_ref.get_slots() for k, v in slots_data.items(): meta_dict["slots"][k] = v meta_dict["progress"] = self._status_ref.get_progress() meta_dict["details"] = gather_node_info() if options.vineyard.socket: # pragma: no cover import vineyard client = vineyard.connect(options.vineyard.socket) meta_dict["vineyard"] = {"instance_id": client.instance_id} self._resource_ref.set_worker_meta(self._endpoint, meta_dict) except Exception as ex: logger.error( "Failed to save status: %s. repr(meta_dict)=%r", str(ex), meta_dict ) finally: self.ref().collect_status(_tell=True, _delay=1)
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def put_objects_by_keys(self, session_id, data_keys, shapes=None, pin_token=None): sizes = [] for data_key in data_keys: buf = None try: buf = self._shared_store.get_buffer(session_id, data_key) size = len(buf) self._internal_put_object(session_id, data_key, buf, size) finally: del buf sizes.append(size) if pin_token: self.pin_data_keys(session_id, data_keys, pin_token) self._finish_put_objects(session_id, data_keys) self.storage_client.register_data( session_id, data_keys, (0, self._storage_device), sizes, shapes=shapes )
def put_objects_by_keys(self, session_id, data_keys, shapes=None, pin_token=None): sizes = [] for data_key in data_keys: buf = None try: buf = self._shared_store.get_buffer(session_id, data_key) size = len(buf) self._internal_put_object(session_id, data_key, buf, size) finally: del buf sizes.append(size) if pin_token: self.pin_data_keys(session_id, data_keys, pin_token) self.storage_client.register_data( session_id, data_keys, (0, self._storage_device), sizes, shapes=shapes )
https://github.com/mars-project/mars/issues/1672
2020-11-02 16:51:59,275 mars.scheduler.operands.common 143 ERROR Attempt 1: Unexpected error ValueError occurred in executing operand 05f71b4ed53f21cea47398b40c0ec61d in 33.19.117.174:21137 Traceback (most recent call last): File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 111, in request_batch_quota make_first=all_allocated, process_quota=process_quota) File "/home/admin/work/turing_dev-pymars-0.6.0a3.zip/mars/worker/quota.py", line 158, in _request_quota raise ValueError(f'Cannot allocate quota size {delta} ' ValueError: Cannot allocate quota size 22002064004 larger than total capacity 21259621171.
ValueError
def execute(cls, ctx, op): def _base_concat(chunk, inputs): # auto generated concat when executing a DataFrame, Series or Index if chunk.op.output_types[0] == OutputType.dataframe: return _auto_concat_dataframe_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.series: return _auto_concat_series_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.index: return _auto_concat_index_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.categorical: return _auto_concat_categorical_chunks(chunk, inputs) else: # pragma: no cover raise TypeError( "Only DataFrameChunk, SeriesChunk, IndexChunk, " "and CategoricalChunk can be automatically concatenated" ) def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index def _auto_concat_categorical_chunks(_, inputs): if len(inputs) == 1: # pragma: no cover return inputs[0] else: # convert categorical into array arrays = [np.asarray(inp) for inp in inputs] array = np.concatenate(arrays) return pd.Categorical( array, categories=inputs[0].categories, ordered=inputs[0].ordered ) chunk = op.outputs[0] inputs = [ctx[input.key] for input in op.inputs] if isinstance(inputs[0], tuple): ctx[chunk.key] = tuple( _base_concat(chunk, [input[i] for input in inputs]) for i in range(len(inputs[0])) ) else: ctx[chunk.key] = _base_concat(chunk, inputs)
def execute(cls, ctx, op): def _base_concat(chunk, inputs): # auto generated concat when executing a DataFrame, Series or Index if chunk.op.output_types[0] == OutputType.dataframe: return _auto_concat_dataframe_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.series: return _auto_concat_series_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.index: return _auto_concat_index_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.categorical: return _auto_concat_categorical_chunks(chunk, inputs) else: # pragma: no cover raise TypeError( "Only DataFrameChunk, SeriesChunk, IndexChunk, " "and CategoricalChunk can be automatically concatenated" ) def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if all(np.isscalar(inp) for inp in inputs): return pd.Series(inputs) else: if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index def _auto_concat_categorical_chunks(_, inputs): if len(inputs) == 1: # pragma: no cover return inputs[0] else: # convert categorical into array arrays = [np.asarray(inp) for inp in inputs] array = np.concatenate(arrays) return pd.Categorical( array, categories=inputs[0].categories, ordered=inputs[0].ordered ) chunk = op.outputs[0] inputs = [ctx[input.key] for input in op.inputs] if isinstance(inputs[0], tuple): ctx[chunk.key] = tuple( _base_concat(chunk, [input[i] for input in inputs]) for i in range(len(inputs[0])) ) else: ctx[chunk.key] = _base_concat(chunk, inputs)
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat
def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if all(np.isscalar(inp) for inp in inputs): return pd.Series(inputs) else: if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _execute_map_with_count(cls, ctx, op, reduction_func=None): # Execution with specified `min_count` in the map stage xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] if isinstance(in_data, pd.Series): count = in_data.count() else: count = in_data.count(axis=op.axis, numeric_only=op.numeric_only) r = cls._execute_reduction(in_data, op, reduction_func=reduction_func) if isinstance(in_data, xdf.Series): if op.output_types[0] == OutputType.series: r = xdf.Series([r]) count = xdf.Series([count]) ctx[op.outputs[0].key] = (r, count) else: # For dataframe, will keep dimensions for intermediate results. ctx[op.outputs[0].key] = ( (xdf.DataFrame(r), xdf.DataFrame(count)) if op.axis == 1 else (xdf.DataFrame(r).transpose(), xdf.DataFrame(count).transpose()) )
def _execute_map_with_count(cls, ctx, op, reduction_func=None): # Execution with specified `min_count` in the map stage xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] if isinstance(in_data, pd.Series): count = in_data.count() else: count = in_data.count(axis=op.axis, numeric_only=op.numeric_only) r = cls._execute_reduction(in_data, op, reduction_func=reduction_func) if isinstance(in_data, xdf.Series): ctx[op.outputs[0].key] = (r, count) else: # For dataframe, will keep dimensions for intermediate results. ctx[op.outputs[0].key] = ( (xdf.DataFrame(r), xdf.DataFrame(count)) if op.axis == 1 else (xdf.DataFrame(r).transpose(), xdf.DataFrame(count).transpose()) )
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _execute_combine_with_count(cls, ctx, op, reduction_func=None): # Execution with specified `min_count` in the combine stage xdf = cudf if op.gpu else pd in_data, concat_count = ctx[op.inputs[0].key] count = concat_count.sum(axis=op.axis) r = cls._execute_reduction(in_data, op, reduction_func=reduction_func) if isinstance(in_data, xdf.Series): if op.output_types[0] == OutputType.series: r = xdf.Series([r]) count = xdf.Series([count]) ctx[op.outputs[0].key] = (r, count) else: # For dataframe, will keep dimensions for intermediate results. ctx[op.outputs[0].key] = ( (xdf.DataFrame(r), xdf.DataFrame(count)) if op.axis == 1 else (xdf.DataFrame(r).transpose(), xdf.DataFrame(count).transpose()) )
def _execute_combine_with_count(cls, ctx, op, reduction_func=None): # Execution with specified `min_count` in the combine stage xdf = cudf if op.gpu else pd in_data, concat_count = ctx[op.inputs[0].key] count = concat_count.sum(axis=op.axis) r = cls._execute_reduction(in_data, op, reduction_func=reduction_func) if isinstance(in_data, xdf.Series): ctx[op.outputs[0].key] = (r, count) else: # For dataframe, will keep dimensions for intermediate results. ctx[op.outputs[0].key] = ( (xdf.DataFrame(r), xdf.DataFrame(count)) if op.axis == 1 else (xdf.DataFrame(r).transpose(), xdf.DataFrame(count).transpose()) )
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _execute_without_count(cls, ctx, op, reduction_func=None): # Execution for normal reduction operands. # For dataframe, will keep dimensions for intermediate results. xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] r = cls._execute_reduction( in_data, op, min_count=op.min_count, reduction_func=reduction_func ) if isinstance(in_data, xdf.Series) or op.output_types[0] == OutputType.series: if op.output_types[0] == OutputType.series and not isinstance(r, xdf.Series): r = xdf.Series([r]) ctx[op.outputs[0].key] = r else: if op.axis == 0: if op.gpu: df = xdf.DataFrame(r).transpose() df.columns = r.index.to_arrow().to_pylist() else: # cannot just do xdf.DataFrame(r).T # cuz the dtype will be object since pandas 1.0 df = xdf.DataFrame(OrderedDict((d, [v]) for d, v in r.iteritems())) else: df = xdf.DataFrame(r) ctx[op.outputs[0].key] = df
def _execute_without_count(cls, ctx, op, reduction_func=None): # Execution for normal reduction operands. # For dataframe, will keep dimensions for intermediate results. xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] r = cls._execute_reduction( in_data, op, min_count=op.min_count, reduction_func=reduction_func ) if isinstance(in_data, xdf.Series) or op.output_types[0] == OutputType.series: ctx[op.outputs[0].key] = r else: if op.axis == 0: if op.gpu: df = xdf.DataFrame(r).transpose() df.columns = r.index.to_arrow().to_pylist() else: # cannot just do xdf.DataFrame(r).T # cuz the dtype will be object since pandas 1.0 df = xdf.DataFrame(OrderedDict((d, [v]) for d, v in r.iteritems())) else: df = xdf.DataFrame(r) ctx[op.outputs[0].key] = df
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _execute_combine(cls, ctx, op): xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] count_sum = in_data.sum(axis=op.axis) if isinstance(in_data, xdf.Series): if op.output_types[0] == OutputType.series and not isinstance( count_sum, xdf.Series ): count_sum = xdf.Series([count_sum]) ctx[op.outputs[0].key] = count_sum else: ctx[op.outputs[0].key] = ( xdf.DataFrame(count_sum) if op.axis == 1 else xdf.DataFrame(count_sum).transpose() )
def _execute_combine(cls, ctx, op): xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] count_sum = in_data.sum(axis=op.axis) if isinstance(in_data, xdf.Series): ctx[op.outputs[0].key] = count_sum else: ctx[op.outputs[0].key] = ( xdf.DataFrame(count_sum) if op.axis == 1 else xdf.DataFrame(count_sum).transpose() )
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _execute_map(cls, ctx, op): xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] if isinstance(in_data, pd.Series): count = in_data.count() else: count = in_data.count(axis=op.axis, numeric_only=op.numeric_only) r = cls._execute_reduction(in_data, op, reduction_func="sum") avg = cls._keep_dim(r / count, op) kwargs = dict(axis=op.axis, skipna=op.skipna) if op.numeric_only: in_data = in_data[avg.columns] avg = avg if np.isscalar(avg) else np.array(avg) var_square = ((in_data.subtract(avg)) ** 2).sum(**kwargs) if isinstance(in_data, xdf.Series): if op.output_types[0] == OutputType.series and not isinstance(r, xdf.Series): r = xdf.Series([r]) count = xdf.Series([count]) var_square = xdf.Series([var_square]) ctx[op.outputs[0].key] = (r, count, var_square) else: ctx[op.outputs[0].key] = tuple( cls._keep_dim(df, op) for df in [r, count, var_square] )
def _execute_map(cls, ctx, op): xdf = cudf if op.gpu else pd in_data = ctx[op.inputs[0].key] if isinstance(in_data, pd.Series): count = in_data.count() else: count = in_data.count(axis=op.axis, numeric_only=op.numeric_only) r = cls._execute_reduction(in_data, op, reduction_func="sum") avg = cls._keep_dim(r / count, op) kwargs = dict(axis=op.axis, skipna=op.skipna) if op.numeric_only: in_data = in_data[avg.columns] avg = avg if np.isscalar(avg) else np.array(avg) var_square = ((in_data.subtract(avg)) ** 2).sum(**kwargs) if isinstance(in_data, xdf.Series): ctx[op.outputs[0].key] = (r, count, var_square) else: ctx[op.outputs[0].key] = tuple( cls._keep_dim(df, op) for df in [r, count, var_square] )
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _execute_combine(cls, ctx, op): data, concat_count, var_square = ctx[op.inputs[0].key] xdf = cudf if op.gpu else pd count = concat_count.sum(axis=op.axis) r = cls._execute_reduction(data, op, reduction_func="sum") avg = cls._keep_dim(r / count, op) avg_diff = data / concat_count - avg kwargs = dict(axis=op.axis, skipna=op.skipna) reduced_var_square = var_square.sum(**kwargs) + (concat_count * avg_diff**2).sum( **kwargs ) if isinstance(data, xdf.Series): if op.output_types[0] == OutputType.series and not isinstance(r, xdf.Series): r = xdf.Series([r]) count = xdf.Series([count]) reduced_var_square = xdf.Series([reduced_var_square]) ctx[op.outputs[0].key] = (r, count, reduced_var_square) else: ctx[op.outputs[0].key] = tuple( cls._keep_dim(df, op) for df in [r, count, reduced_var_square] )
def _execute_combine(cls, ctx, op): data, concat_count, var_square = ctx[op.inputs[0].key] xdf = cudf if op.gpu else pd count = concat_count.sum(axis=op.axis) r = cls._execute_reduction(data, op, reduction_func="sum") avg = cls._keep_dim(r / count, op) avg_diff = data / concat_count - avg kwargs = dict(axis=op.axis, skipna=op.skipna) reduced_var_square = var_square.sum(**kwargs) + (concat_count * avg_diff**2).sum( **kwargs ) if isinstance(data, xdf.Series): ctx[op.outputs[0].key] = (r, count, reduced_var_square) else: ctx[op.outputs[0].key] = tuple( cls._keep_dim(df, op) for df in [r, count, reduced_var_square] )
https://github.com/mars-project/mars/issues/1682
In [1]: import mars.dataframe as md In [2]: from datetime import datetime In [3]: s = md.Series([datetime.now(), datetime.now(), datetime.now()], chunk_si ...: ze=2) In [4]: s.max().execute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-8891e4a12063> in <module> ----> 1 s.max().execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 638 639 if wait: --> 640 return run() 641 else: 642 thread_executor = ThreadPoolExecutor(1) ~/Workspace/mars/mars/core.py in run() 634 635 def run(): --> 636 self.data.execute(session, **kw) 637 return self 638 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 374 375 if wait: --> 376 return run() 377 else: 378 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 369 def run(): 370 # no more fetch, thus just fire run --> 371 session.run(self, **kw) 372 # return Tileable or ExecutableTuple itself 373 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/dataframe/merge/concat.py in execute(cls, ctx, op) 290 for i in range(len(inputs[0]))) 291 else: --> 292 ctx[chunk.key] = _base_concat(chunk, inputs) 293 294 @classmethod ~/Workspace/mars/mars/dataframe/merge/concat.py in _base_concat(chunk, inputs) 193 return _auto_concat_dataframe_chunks(chunk, inputs) 194 elif chunk.op.output_types[0] == OutputType.series: --> 195 return _auto_concat_series_chunks(chunk, inputs) 196 elif chunk.op.output_types[0] == OutputType.index: 197 return _auto_concat_index_chunks(chunk, inputs) ~/Workspace/mars/mars/dataframe/merge/concat.py in _auto_concat_series_chunks(chunk, inputs) 256 concat = xdf.concat(inputs, axis=chunk.op.axis) 257 else: --> 258 concat = xdf.concat(inputs) 259 if getattr(chunk.index_value, 'should_be_monotonic', False): 260 concat.sort_index(inplace=True) ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in concat(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy) 282 verify_integrity=verify_integrity, 283 copy=copy, --> 284 sort=sort, 285 ) 286 ~/miniconda3/lib/python3.7/site-packages/pandas/core/reshape/concat.py in __init__(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort) 357 "only Series and DataFrame objs are valid" 358 ) --> 359 raise TypeError(msg) 360 361 # consolidate TypeError: cannot concatenate object of type '<class 'pandas._libs.tslibs.timestamps.Timestamp'>'; only Series and DataFrame objs are valid
TypeError
def _tile_with_tensor(cls, op): out = op.outputs[0] axis = op.axis rhs_is_tensor = isinstance(op.rhs, TENSOR_TYPE) tensor, other = (op.rhs, op.lhs) if rhs_is_tensor else (op.lhs, op.rhs) if tensor.shape == other.shape: tensor = tensor.rechunk(other.nsplits)._inplace_tile() else: # shape differs only when dataframe add 1-d tensor, we need rechunk on columns axis. if op.axis in ["columns", 1] and other.ndim == 1: # force axis == 0 if it's Series other than DataFrame axis = 0 rechunk_size = ( other.nsplits[1] if axis == "columns" or axis == 1 else other.nsplits[0] ) if tensor.ndim > 0: tensor = tensor.rechunk((rechunk_size,))._inplace_tile() cum_splits = [0] + np.cumsum(other.nsplits[axis]).tolist() out_chunks = [] for out_index in itertools.product(*(map(range, other.chunk_shape))): tensor_chunk = tensor.cix[out_index[: tensor.ndim]] other_chunk = other.cix[out_index] out_op = op.copy().reset_key() inputs = ( [other_chunk, tensor_chunk] if rhs_is_tensor else [tensor_chunk, other_chunk] ) if isinstance(other_chunk, DATAFRAME_CHUNK_TYPE): start = cum_splits[out_index[axis]] end = cum_splits[out_index[axis] + 1] chunk_dtypes = out.dtypes.iloc[start:end] out_chunk = out_op.new_chunk( inputs, shape=other_chunk.shape, index=other_chunk.index, dtypes=chunk_dtypes, index_value=other_chunk.index_value, columns_value=other.columns_value, ) else: out_chunk = out_op.new_chunk( inputs, shape=other_chunk.shape, index=other_chunk.index, dtype=out.dtype, index_value=other_chunk.index_value, name=other_chunk.name, ) out_chunks.append(out_chunk) new_op = op.copy() if isinstance(other, SERIES_TYPE): return new_op.new_seriess( op.inputs, other.shape, nsplits=other.nsplits, dtype=out.dtype, index_value=other.index_value, chunks=out_chunks, ) else: return new_op.new_dataframes( op.inputs, other.shape, nsplits=other.nsplits, dtypes=out.dtypes, index_value=other.index_value, columns_value=other.columns_value, chunks=out_chunks, )
def _tile_with_tensor(cls, op): rhs_is_tensor = isinstance(op.rhs, TENSOR_TYPE) tensor, other = (op.rhs, op.lhs) if rhs_is_tensor else (op.lhs, op.rhs) if tensor.shape == other.shape: tensor = tensor.rechunk(other.nsplits)._inplace_tile() else: # shape differs only when dataframe add 1-d tensor, we need rechunk on columns axis. rechunk_size = ( other.nsplits[1] if op.axis == "columns" or op.axis == 1 else other.nsplits[0] ) if tensor.ndim > 0: tensor = tensor.rechunk((rechunk_size,))._inplace_tile() out_chunks = [] for out_index in itertools.product(*(map(range, other.chunk_shape))): tensor_chunk = tensor.cix[out_index[: tensor.ndim]] other_chunk = other.cix[out_index] out_op = op.copy().reset_key() inputs = ( [other_chunk, tensor_chunk] if rhs_is_tensor else [tensor_chunk, other_chunk] ) if isinstance(other_chunk, DATAFRAME_CHUNK_TYPE): out_chunk = out_op.new_chunk( inputs, shape=other_chunk.shape, index=other_chunk.index, dtypes=other_chunk.dtypes, index_value=other_chunk.index_value, columns_value=other.columns_value, ) else: out_chunk = out_op.new_chunk( inputs, shape=other_chunk.shape, index=other_chunk.index, dtype=other_chunk.dtype, index_value=other_chunk.index_value, name=other_chunk.name, ) out_chunks.append(out_chunk) new_op = op.copy() out = op.outputs[0] if isinstance(other, SERIES_TYPE): return new_op.new_seriess( op.inputs, other.shape, nsplits=other.nsplits, dtype=out.dtype, index_value=other.index_value, chunks=out_chunks, ) else: return new_op.new_dataframes( op.inputs, other.shape, nsplits=other.nsplits, dtypes=out.dtypes, index_value=other.index_value, columns_value=other.columns_value, chunks=out_chunks, )
https://github.com/mars-project/mars/issues/1674
In [9]: df = md.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3], ...: 'c': [datetime(2020, 1, 1), datetime.now(), datetime(2000, 3, 3, 11, 22, 23)]}) In[10]: df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() Traceback (most recent call last): File "/Users/qinxuye/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-10-d9c9c3710287>", line 1, in <module> df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 640, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 636, in run self.data.execute(session, **kw) File "/Users/qinxuye/Workspace/mars/mars/core.py", line 376, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 371, in run session.run(self, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 500, in run result = self._sess.run(*tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 108, in run res = self._executor.execute_tileables(tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/executor.py", line 861, in execute_tileables tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 201, in _tile tds[0]._inplace_tile() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 165, in _inplace_tile return handler.inplace_tile(self) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 136, in inplace_tile dispatched = self.dispatch(to_tile.op) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 261, in tile return cls._tile_with_tensor(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 216, in _tile_with_tensor rechunk_size = other.nsplits[1] if op.axis == 'columns' or op.axis == 1 else other.nsplits[0] IndexError: tuple index out of range
IndexError
def execute(cls, ctx, op): if len(op.inputs) == 2: df, other = ctx[op.inputs[0].key], ctx[op.inputs[1].key] if isinstance(op.inputs[0], SERIES_CHUNK_TYPE) and isinstance( op.inputs[1], DATAFRAME_CHUNK_TYPE ): df, other = other, df func_name = getattr(cls, "_rfunc_name") else: func_name = getattr(cls, "_func_name") elif pd.api.types.is_scalar(op.lhs) or isinstance(op.lhs, np.ndarray): df = ctx[op.rhs.key] other = op.lhs func_name = getattr(cls, "_rfunc_name") else: df = ctx[op.lhs.key] other = op.rhs func_name = getattr(cls, "_func_name") if df.ndim == 2: kw = dict({"axis": op.axis}) else: kw = dict() if op.fill_value is not None: # comparison function like eq does not have `fill_value` kw["fill_value"] = op.fill_value if op.level is not None: # logical function like and may don't have `level` (for Series type) kw["level"] = op.level if hasattr(other, "ndim") and other.ndim == 0: other = other.item() ctx[op.outputs[0].key] = getattr(df, func_name)(other, **kw)
def execute(cls, ctx, op): if len(op.inputs) == 2: df, other = ctx[op.inputs[0].key], ctx[op.inputs[1].key] if isinstance(op.inputs[0], SERIES_CHUNK_TYPE) and isinstance( op.inputs[1], DATAFRAME_CHUNK_TYPE ): df, other = other, df func_name = getattr(cls, "_rfunc_name") else: func_name = getattr(cls, "_func_name") elif pd.api.types.is_scalar(op.lhs) or isinstance(op.lhs, np.ndarray): df = ctx[op.rhs.key] other = op.lhs func_name = getattr(cls, "_rfunc_name") else: df = ctx[op.lhs.key] other = op.rhs func_name = getattr(cls, "_func_name") if df.ndim == 2: kw = dict({"axis": op.axis}) else: kw = dict() if op.fill_value is not None: # comparison function like eq does not have `fill_value` kw["fill_value"] = op.fill_value if op.level is not None: # logical function like and may don't have `level` (for Series type) kw["level"] = op.level ctx[op.outputs[0].key] = getattr(df, func_name)(other, **kw)
https://github.com/mars-project/mars/issues/1674
In [9]: df = md.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3], ...: 'c': [datetime(2020, 1, 1), datetime.now(), datetime(2000, 3, 3, 11, 22, 23)]}) In[10]: df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() Traceback (most recent call last): File "/Users/qinxuye/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-10-d9c9c3710287>", line 1, in <module> df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 640, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 636, in run self.data.execute(session, **kw) File "/Users/qinxuye/Workspace/mars/mars/core.py", line 376, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 371, in run session.run(self, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 500, in run result = self._sess.run(*tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 108, in run res = self._executor.execute_tileables(tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/executor.py", line 861, in execute_tileables tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 201, in _tile tds[0]._inplace_tile() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 165, in _inplace_tile return handler.inplace_tile(self) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 136, in inplace_tile dispatched = self.dispatch(to_tile.op) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 261, in tile return cls._tile_with_tensor(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 216, in _tile_with_tensor rechunk_size = other.nsplits[1] if op.axis == 'columns' or op.axis == 1 else other.nsplits[0] IndexError: tuple index out of range
IndexError
def _calc_properties(cls, x1, x2=None, axis="columns"): if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): if x2 is None: dtypes = x1.dtypes elif pd.api.types.is_scalar(x2): dtypes = cls._operator(build_empty_df(x1.dtypes), x2).dtypes elif x1.dtypes is not None and isinstance(x2, TENSOR_TYPE): dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) else: dtypes = x1.dtypes return { "shape": x1.shape, "dtypes": dtypes, "columns_value": x1.columns_value, "index_value": x1.index_value, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): x2_dtype = x2.dtype if hasattr(x2, "dtype") else type(x2) dtype = infer_dtype(x1.dtype, np.dtype(x2_dtype), cls._operator) ret = {"shape": x1.shape, "dtype": dtype, "index_value": x1.index_value} if pd.api.types.is_scalar(x2) or (hasattr(x2, "ndim") and x2.ndim == 0): ret["name"] = x1.name return ret if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE) ): index_shape, column_shape, dtypes, columns, index = ( np.nan, np.nan, None, None, None, ) if ( x1.columns_value is not None and x2.columns_value is not None and x1.columns_value.key == x2.columns_value.key ): dtypes = pd.Series( [ infer_dtype(dt1, dt2, cls._operator) for dt1, dt2 in zip(x1.dtypes, x2.dtypes) ], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) elif x1.dtypes is not None and x2.dtypes is not None: dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator) columns = parse_index(dtypes.index, store_data=True) columns.value.should_be_monotonic = True column_shape = len(dtypes) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): if axis == "columns" or axis == 1: index_shape = x1.shape[0] index = x1.index_value column_shape, dtypes, columns = np.nan, None, None if x1.columns_value is not None and x1.index_value is not None: if x1.columns_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) else: # pragma: no cover dtypes = x1.dtypes # FIXME columns = infer_index_value(x1.columns_value, x2.index_value) columns.value.should_be_monotonic = True column_shape = np.nan else: assert axis == "index" or axis == 0 column_shape = x1.shape[1] columns = x1.columns_value dtypes = x1.dtypes index_shape, index = np.nan, None if x1.index_value is not None and x1.index_value is not None: if x1.index_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: if x1.dtypes is not None: dtypes = pd.Series( [ infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes ], index=x1.dtypes.index, ) index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True index_shape = np.nan return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): index_shape, dtype, index = np.nan, None, None dtype = infer_dtype(x1.dtype, x2.dtype, cls._operator) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) ret = {"shape": (index_shape,), "dtype": dtype, "index_value": index} if x1.name == x2.name: ret["name"] = x1.name return ret raise NotImplementedError("Unknown combination of parameters")
def _calc_properties(cls, x1, x2=None, axis="columns"): if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): if x2 is None: dtypes = x1.dtypes elif pd.api.types.is_scalar(x2): dtypes = cls._operator(build_empty_df(x1.dtypes), x2).dtypes elif x1.dtypes is not None and isinstance(x2, TENSOR_TYPE): dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) else: dtypes = x1.dtypes return { "shape": x1.shape, "dtypes": dtypes, "columns_value": x1.columns_value, "index_value": x1.index_value, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): x2_dtype = x2.dtype if hasattr(x2, "dtype") else type(x2) dtype = infer_dtype(x1.dtype, np.dtype(x2_dtype), cls._operator) return {"shape": x1.shape, "dtype": dtype, "index_value": x1.index_value} if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE) ): index_shape, column_shape, dtypes, columns, index = ( np.nan, np.nan, None, None, None, ) if ( x1.columns_value is not None and x2.columns_value is not None and x1.columns_value.key == x2.columns_value.key ): dtypes = pd.Series( [ infer_dtype(dt1, dt2, cls._operator) for dt1, dt2 in zip(x1.dtypes, x2.dtypes) ], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) elif x1.dtypes is not None and x2.dtypes is not None: dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator) columns = parse_index(dtypes.index, store_data=True) columns.value.should_be_monotonic = True column_shape = len(dtypes) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): if axis == "columns" or axis == 1: index_shape = x1.shape[0] index = x1.index_value column_shape, dtypes, columns = np.nan, None, None if x1.columns_value is not None and x1.index_value is not None: if x1.columns_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) else: # pragma: no cover dtypes = x1.dtypes # FIXME columns = infer_index_value(x1.columns_value, x2.index_value) columns.value.should_be_monotonic = True column_shape = np.nan else: assert axis == "index" or axis == 0 column_shape = x1.shape[1] columns = x1.columns_value dtypes = x1.dtypes index_shape, index = np.nan, None if x1.index_value is not None and x1.index_value is not None: if x1.index_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: if x1.dtypes is not None: dtypes = pd.Series( [ infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes ], index=x1.dtypes.index, ) index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True index_shape = np.nan return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): index_shape, dtype, index = np.nan, None, None dtype = infer_dtype(x1.dtype, x2.dtype, cls._operator) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) return {"shape": (index_shape,), "dtype": dtype, "index_value": index} raise NotImplementedError("Unknown combination of parameters")
https://github.com/mars-project/mars/issues/1674
In [9]: df = md.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3], ...: 'c': [datetime(2020, 1, 1), datetime.now(), datetime(2000, 3, 3, 11, 22, 23)]}) In[10]: df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() Traceback (most recent call last): File "/Users/qinxuye/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-10-d9c9c3710287>", line 1, in <module> df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 640, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 636, in run self.data.execute(session, **kw) File "/Users/qinxuye/Workspace/mars/mars/core.py", line 376, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 371, in run session.run(self, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 500, in run result = self._sess.run(*tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 108, in run res = self._executor.execute_tileables(tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/executor.py", line 861, in execute_tileables tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 201, in _tile tds[0]._inplace_tile() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 165, in _inplace_tile return handler.inplace_tile(self) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 136, in inplace_tile dispatched = self.dispatch(to_tile.op) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 261, in tile return cls._tile_with_tensor(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 216, in _tile_with_tensor rechunk_size = other.nsplits[1] if op.axis == 'columns' or op.axis == 1 else other.nsplits[0] IndexError: tuple index out of range
IndexError
def execute(cls, ctx, op): def _base_concat(chunk, inputs): # auto generated concat when executing a DataFrame, Series or Index if chunk.op.output_types[0] == OutputType.dataframe: return _auto_concat_dataframe_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.series: return _auto_concat_series_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.index: return _auto_concat_index_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.categorical: return _auto_concat_categorical_chunks(chunk, inputs) else: # pragma: no cover raise TypeError( "Only DataFrameChunk, SeriesChunk, IndexChunk, " "and CategoricalChunk can be automatically concatenated" ) def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if all(np.isscalar(inp) for inp in inputs): return pd.Series(inputs) else: if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index def _auto_concat_categorical_chunks(_, inputs): if len(inputs) == 1: # pragma: no cover return inputs[0] else: # convert categorical into array arrays = [np.asarray(inp) for inp in inputs] array = np.concatenate(arrays) return pd.Categorical( array, categories=inputs[0].categories, ordered=inputs[0].ordered ) chunk = op.outputs[0] inputs = [ctx[input.key] for input in op.inputs] if isinstance(inputs[0], tuple): ctx[chunk.key] = tuple( _base_concat(chunk, [input[i] for input in inputs]) for i in range(len(inputs[0])) ) else: ctx[chunk.key] = _base_concat(chunk, inputs)
def execute(cls, ctx, op): def _base_concat(chunk, inputs): # auto generated concat when executing a DataFrame, Series or Index if chunk.op.output_types[0] == OutputType.dataframe: return _auto_concat_dataframe_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.series: return _auto_concat_series_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.index: return _auto_concat_index_chunks(chunk, inputs) elif chunk.op.output_types[0] == OutputType.categorical: return _auto_concat_categorical_chunks(chunk, inputs) else: # pragma: no cover raise TypeError( "Only DataFrameChunk, SeriesChunk, IndexChunk, " "and CategoricalChunk can be automatically concatenated" ) def _auto_concat_dataframe_chunks(chunk, inputs): xdf = pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) else cudf if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if all(np.isscalar(inp) for inp in inputs): return pd.Series(inputs) else: if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index def _auto_concat_categorical_chunks(_, inputs): if len(inputs) == 1: # pragma: no cover return inputs[0] else: # convert categorical into array arrays = [np.asarray(inp) for inp in inputs] array = np.concatenate(arrays) return pd.Categorical( array, categories=inputs[0].categories, ordered=inputs[0].ordered ) chunk = op.outputs[0] inputs = [ctx[input.key] for input in op.inputs] if isinstance(inputs[0], tuple): ctx[chunk.key] = tuple( _base_concat(chunk, [input[i] for input in inputs]) for i in range(len(inputs[0])) ) else: ctx[chunk.key] = _base_concat(chunk, inputs)
https://github.com/mars-project/mars/issues/1674
In [9]: df = md.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3], ...: 'c': [datetime(2020, 1, 1), datetime.now(), datetime(2000, 3, 3, 11, 22, 23)]}) In[10]: df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() Traceback (most recent call last): File "/Users/qinxuye/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-10-d9c9c3710287>", line 1, in <module> df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 640, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 636, in run self.data.execute(session, **kw) File "/Users/qinxuye/Workspace/mars/mars/core.py", line 376, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 371, in run session.run(self, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 500, in run result = self._sess.run(*tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 108, in run res = self._executor.execute_tileables(tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/executor.py", line 861, in execute_tileables tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 201, in _tile tds[0]._inplace_tile() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 165, in _inplace_tile return handler.inplace_tile(self) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 136, in inplace_tile dispatched = self.dispatch(to_tile.op) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 261, in tile return cls._tile_with_tensor(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 216, in _tile_with_tensor rechunk_size = other.nsplits[1] if op.axis == 'columns' or op.axis == 1 else other.nsplits[0] IndexError: tuple index out of range
IndexError
def _auto_concat_dataframe_chunks(chunk, inputs): xdf = ( pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) or cudf is None else cudf ) if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret
def _auto_concat_dataframe_chunks(chunk, inputs): xdf = pd if isinstance(inputs[0], (pd.DataFrame, pd.Series)) else cudf if chunk.op.axis is not None: return xdf.concat(inputs, axis=op.axis) # auto generated concat when executing a DataFrame if len(inputs) == 1: ret = inputs[0] else: max_rows = max(inp.index[0] for inp in chunk.inputs) min_rows = min(inp.index[0] for inp in chunk.inputs) n_rows = max_rows - min_rows + 1 n_cols = int(len(inputs) // n_rows) assert n_rows * n_cols == len(inputs) concats = [] for i in range(n_rows): if n_cols == 1: concats.append(inputs[i]) else: concat = xdf.concat( [inputs[i * n_cols + j] for j in range(n_cols)], axis=1 ) concats.append(concat) if xdf is pd: # The `sort=False` is to suppress a `FutureWarning` of pandas, # when the index or column of chunks to concatenate is not aligned, # which may happens for certain ops. # # See also Note [Columns of Left Join] in test_merge_execution.py. ret = xdf.concat(concats, sort=False) else: ret = xdf.concat(concats) # cuDF will lost index name when concat two seriess. ret.index.name = concats[0].index.name if getattr(chunk.index_value, "should_be_monotonic", False): ret.sort_index(inplace=True) if getattr(chunk.columns_value, "should_be_monotonic", False): ret.sort_index(axis=1, inplace=True) return ret
https://github.com/mars-project/mars/issues/1674
In [9]: df = md.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3], ...: 'c': [datetime(2020, 1, 1), datetime.now(), datetime(2000, 3, 3, 11, 22, 23)]}) In[10]: df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() Traceback (most recent call last): File "/Users/qinxuye/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-10-d9c9c3710287>", line 1, in <module> df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 640, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 636, in run self.data.execute(session, **kw) File "/Users/qinxuye/Workspace/mars/mars/core.py", line 376, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 371, in run session.run(self, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 500, in run result = self._sess.run(*tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 108, in run res = self._executor.execute_tileables(tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/executor.py", line 861, in execute_tileables tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 201, in _tile tds[0]._inplace_tile() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 165, in _inplace_tile return handler.inplace_tile(self) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 136, in inplace_tile dispatched = self.dispatch(to_tile.op) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 261, in tile return cls._tile_with_tensor(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 216, in _tile_with_tensor rechunk_size = other.nsplits[1] if op.axis == 'columns' or op.axis == 1 else other.nsplits[0] IndexError: tuple index out of range
IndexError
def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if all(np.isscalar(inp) for inp in inputs): return pd.Series(inputs) else: if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) or cudf is None else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat
def _auto_concat_series_chunks(chunk, inputs): # auto generated concat when executing a Series if all(np.isscalar(inp) for inp in inputs): return pd.Series(inputs) else: if len(inputs) == 1: concat = inputs[0] else: xdf = pd if isinstance(inputs[0], pd.Series) else cudf if chunk.op.axis is not None: concat = xdf.concat(inputs, axis=chunk.op.axis) else: concat = xdf.concat(inputs) if getattr(chunk.index_value, "should_be_monotonic", False): concat.sort_index(inplace=True) return concat
https://github.com/mars-project/mars/issues/1674
In [9]: df = md.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3], ...: 'c': [datetime(2020, 1, 1), datetime.now(), datetime(2000, 3, 3, 11, 22, 23)]}) In[10]: df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() Traceback (most recent call last): File "/Users/qinxuye/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-10-d9c9c3710287>", line 1, in <module> df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 640, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 636, in run self.data.execute(session, **kw) File "/Users/qinxuye/Workspace/mars/mars/core.py", line 376, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 371, in run session.run(self, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 500, in run result = self._sess.run(*tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 108, in run res = self._executor.execute_tileables(tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/executor.py", line 861, in execute_tileables tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 201, in _tile tds[0]._inplace_tile() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 165, in _inplace_tile return handler.inplace_tile(self) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 136, in inplace_tile dispatched = self.dispatch(to_tile.op) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 261, in tile return cls._tile_with_tensor(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 216, in _tile_with_tensor rechunk_size = other.nsplits[1] if op.axis == 'columns' or op.axis == 1 else other.nsplits[0] IndexError: tuple index out of range
IndexError
def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) or cudf is None else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index
def _auto_concat_index_chunks(chunk, inputs): if len(inputs) == 1: xdf = pd if isinstance(inputs[0], pd.Index) else cudf concat_df = xdf.DataFrame(index=inputs[0]) else: xdf = pd if isinstance(inputs[0], pd.Index) else cudf empty_dfs = [xdf.DataFrame(index=inp) for inp in inputs] concat_df = xdf.concat(empty_dfs, axis=0) if getattr(chunk.index_value, "should_be_monotonic", False): concat_df.sort_index(inplace=True) return concat_df.index
https://github.com/mars-project/mars/issues/1674
In [9]: df = md.DataFrame({'a': [1, 2, 3], 'b': [1.1, 2.2, 3.3], ...: 'c': [datetime(2020, 1, 1), datetime.now(), datetime(2000, 3, 3, 11, 22, 23)]}) In[10]: df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() Traceback (most recent call last): File "/Users/qinxuye/miniconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-10-d9c9c3710287>", line 1, in <module> df[(df['c'] > md.to_datetime('2020-08-01')) &amp; (df['c'] < md.to_datetime('2020-11-01'))].head().execute() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 640, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 636, in run self.data.execute(session, **kw) File "/Users/qinxuye/Workspace/mars/mars/core.py", line 376, in execute return run() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 371, in run session.run(self, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 500, in run result = self._sess.run(*tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/session.py", line 108, in run res = self._executor.execute_tileables(tileables, **kw) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/executor.py", line 861, in execute_tileables tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 201, in _tile tds[0]._inplace_tile() File "/Users/qinxuye/Workspace/mars/mars/core.py", line 165, in _inplace_tile return handler.inplace_tile(self) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 136, in inplace_tile dispatched = self.dispatch(to_tile.op) File "/Users/qinxuye/Workspace/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/qinxuye/Workspace/mars/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 261, in tile return cls._tile_with_tensor(op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/arithmetic/core.py", line 216, in _tile_with_tensor rechunk_size = other.nsplits[1] if op.axis == 'columns' or op.axis == 1 else other.nsplits[0] IndexError: tuple index out of range
IndexError
def get_output_types(*objs, unknown_as=None): output_types = [] for obj in objs: if obj is None: continue elif isinstance(obj, (FuseChunk, FuseChunkData)): obj = obj.chunk try: output_types.append(_get_output_type_by_cls(type(obj))) except TypeError: if unknown_as is not None: output_types.append(unknown_as) else: # pragma: no cover raise return output_types
def get_output_types(*objs, unknown_as=None): output_types = [] for obj in objs: if obj is None: continue for tp in OutputType.__members__.values(): try: tileable_types = _OUTPUT_TYPE_TO_TILEABLE_TYPES[tp] chunk_types = _OUTPUT_TYPE_TO_CHUNK_TYPES[tp] if isinstance(obj, (tileable_types, chunk_types)): output_types.append(tp) break except KeyError: continue else: if unknown_as is not None: output_types.append(unknown_as) else: # pragma: no cover raise TypeError("Output can only be tensor, dataframe or series") return output_types
https://github.com/mars-project/mars/issues/1664
TypeError Traceback (most recent call last) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~\AppData\Roaming\Python\Python37\site-packages\mars\graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 643 exc_info = sys.exc_info() --> 644 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 645 f'value {value}, reason: {exc_info[1]}') \ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Failed to set field `extra_params` for Chunk <op=DataFrameFetch, key=08ec6fd4af751d9f5dcec87ea3a6dde3> with value {'order': <TensorOrder.C_ORDER: 'C'>, 'dtype': dtype('<U'), '_i': 0}, reason: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: ExecutionFailed Traceback (most recent call last) <ipython-input-41-4dd810a40cfc> in <module> ----> 1 fsr_st.to_csv("fsr_st03.csv",encoding="utf8").execute() ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~\AppData\Roaming\Python\Python37\site-packages\mars\session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in run(self, *tileables, **kw) 212 timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval 213 try: --> 214 if self._check_response_finished(graph_url, timeout_val): 215 break 216 except KeyboardInterrupt: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in _check_response_finished(self, graph_url, timeout) 172 exc_info = pickle.loads(base64.b64decode(resp_json['exc_info'])) 173 exc = exc_info[1].with_traceback(exc_info[2]) --> 174 raise ExecutionFailed('Graph execution failed.') from exc 175 else: 176 raise ExecutionFailed('Graph execution failed with unknown reason.') ExecutionFailed: 'Graph execution failed.'
TypeError
def get_fetch_op_cls(self, obj): output_types = get_output_types(obj, unknown_as=OutputType.object) fetch_cls, fetch_shuffle_cls = get_fetch_class(output_types[0]) if isinstance(self, ShuffleProxy): cls = fetch_shuffle_cls else: cls = fetch_cls def _inner(**kw): return cls(output_types=output_types, **kw) return _inner
def get_fetch_op_cls(self, obj): raise NotImplementedError
https://github.com/mars-project/mars/issues/1664
TypeError Traceback (most recent call last) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~\AppData\Roaming\Python\Python37\site-packages\mars\graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 643 exc_info = sys.exc_info() --> 644 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 645 f'value {value}, reason: {exc_info[1]}') \ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Failed to set field `extra_params` for Chunk <op=DataFrameFetch, key=08ec6fd4af751d9f5dcec87ea3a6dde3> with value {'order': <TensorOrder.C_ORDER: 'C'>, 'dtype': dtype('<U'), '_i': 0}, reason: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: ExecutionFailed Traceback (most recent call last) <ipython-input-41-4dd810a40cfc> in <module> ----> 1 fsr_st.to_csv("fsr_st03.csv",encoding="utf8").execute() ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~\AppData\Roaming\Python\Python37\site-packages\mars\session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in run(self, *tileables, **kw) 212 timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval 213 try: --> 214 if self._check_response_finished(graph_url, timeout_val): 215 break 216 except KeyboardInterrupt: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in _check_response_finished(self, graph_url, timeout) 172 exc_info = pickle.loads(base64.b64decode(resp_json['exc_info'])) 173 exc = exc_info[1].with_traceback(exc_info[2]) --> 174 raise ExecutionFailed('Graph execution failed.') from exc 175 else: 176 raise ExecutionFailed('Graph execution failed with unknown reason.') ExecutionFailed: 'Graph execution failed.'
TypeError
def __init__(self, to_fetch_key=None, **kw): kw.pop("output_types", None) kw.pop("_output_types", None) super().__init__(_to_fetch_key=to_fetch_key, **kw)
def __init__(self, to_fetch_key=None, **kw): super().__init__(_to_fetch_key=to_fetch_key, **kw)
https://github.com/mars-project/mars/issues/1664
TypeError Traceback (most recent call last) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~\AppData\Roaming\Python\Python37\site-packages\mars\graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 643 exc_info = sys.exc_info() --> 644 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 645 f'value {value}, reason: {exc_info[1]}') \ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Failed to set field `extra_params` for Chunk <op=DataFrameFetch, key=08ec6fd4af751d9f5dcec87ea3a6dde3> with value {'order': <TensorOrder.C_ORDER: 'C'>, 'dtype': dtype('<U'), '_i': 0}, reason: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: ExecutionFailed Traceback (most recent call last) <ipython-input-41-4dd810a40cfc> in <module> ----> 1 fsr_st.to_csv("fsr_st03.csv",encoding="utf8").execute() ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~\AppData\Roaming\Python\Python37\site-packages\mars\session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in run(self, *tileables, **kw) 212 timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval 213 try: --> 214 if self._check_response_finished(graph_url, timeout_val): 215 break 216 except KeyboardInterrupt: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in _check_response_finished(self, graph_url, timeout) 172 exc_info = pickle.loads(base64.b64decode(resp_json['exc_info'])) 173 exc = exc_info[1].with_traceback(exc_info[2]) --> 174 raise ExecutionFailed('Graph execution failed.') from exc 175 else: 176 raise ExecutionFailed('Graph execution failed with unknown reason.') ExecutionFailed: 'Graph execution failed.'
TypeError
def get_fetch_op_cls(self, obj): output_types = get_output_types(obj, unknown_as=OutputType.object) fetch_cls, fetch_shuffle_cls = get_fetch_class(output_types[0]) if isinstance(self, ShuffleProxy): cls = fetch_shuffle_cls else: cls = fetch_cls def _inner(**kw): return cls(output_types=output_types, **kw) return _inner
def get_fetch_op_cls(self, obj): return ObjectFetch
https://github.com/mars-project/mars/issues/1664
TypeError Traceback (most recent call last) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~\AppData\Roaming\Python\Python37\site-packages\mars\graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 643 exc_info = sys.exc_info() --> 644 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 645 f'value {value}, reason: {exc_info[1]}') \ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Failed to set field `extra_params` for Chunk <op=DataFrameFetch, key=08ec6fd4af751d9f5dcec87ea3a6dde3> with value {'order': <TensorOrder.C_ORDER: 'C'>, 'dtype': dtype('<U'), '_i': 0}, reason: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: ExecutionFailed Traceback (most recent call last) <ipython-input-41-4dd810a40cfc> in <module> ----> 1 fsr_st.to_csv("fsr_st03.csv",encoding="utf8").execute() ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~\AppData\Roaming\Python\Python37\site-packages\mars\session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in run(self, *tileables, **kw) 212 timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval 213 try: --> 214 if self._check_response_finished(graph_url, timeout_val): 215 break 216 except KeyboardInterrupt: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in _check_response_finished(self, graph_url, timeout) 172 exc_info = pickle.loads(base64.b64decode(resp_json['exc_info'])) 173 exc = exc_info[1].with_traceback(exc_info[2]) --> 174 raise ExecutionFailed('Graph execution failed.') from exc 175 else: 176 raise ExecutionFailed('Graph execution failed with unknown reason.') ExecutionFailed: 'Graph execution failed.'
TypeError
def __init__(self, dtype=None, to_fetch_key=None, sparse=False, **kw): kw.pop("output_types", None) kw.pop("_output_types", None) super().__init__(_dtype=dtype, _to_fetch_key=to_fetch_key, _sparse=sparse, **kw)
def __init__(self, dtype=None, to_fetch_key=None, sparse=False, **kw): super().__init__(_dtype=dtype, _to_fetch_key=to_fetch_key, _sparse=sparse, **kw)
https://github.com/mars-project/mars/issues/1664
TypeError Traceback (most recent call last) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~\AppData\Roaming\Python\Python37\site-packages\mars\graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 643 exc_info = sys.exc_info() --> 644 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 645 f'value {value}, reason: {exc_info[1]}') \ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Failed to set field `extra_params` for Chunk <op=DataFrameFetch, key=08ec6fd4af751d9f5dcec87ea3a6dde3> with value {'order': <TensorOrder.C_ORDER: 'C'>, 'dtype': dtype('<U'), '_i': 0}, reason: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: ExecutionFailed Traceback (most recent call last) <ipython-input-41-4dd810a40cfc> in <module> ----> 1 fsr_st.to_csv("fsr_st03.csv",encoding="utf8").execute() ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~\AppData\Roaming\Python\Python37\site-packages\mars\session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in run(self, *tileables, **kw) 212 timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval 213 try: --> 214 if self._check_response_finished(graph_url, timeout_val): 215 break 216 except KeyboardInterrupt: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in _check_response_finished(self, graph_url, timeout) 172 exc_info = pickle.loads(base64.b64decode(resp_json['exc_info'])) 173 exc = exc_info[1].with_traceback(exc_info[2]) --> 174 raise ExecutionFailed('Graph execution failed.') from exc 175 else: 176 raise ExecutionFailed('Graph execution failed with unknown reason.') ExecutionFailed: 'Graph execution failed.'
TypeError
def __init__(self, dtype=None, to_fetch_keys=None, to_fetch_idxes=None, **kw): kw.pop("output_types", None) kw.pop("_output_types", None) super().__init__( _dtype=dtype, _to_fetch_keys=to_fetch_keys, _to_fetch_idxes=to_fetch_idxes, **kw )
def __init__(self, dtype=None, to_fetch_keys=None, to_fetch_idxes=None, **kw): super().__init__( _dtype=dtype, _to_fetch_keys=to_fetch_keys, _to_fetch_idxes=to_fetch_idxes, **kw )
https://github.com/mars-project/mars/issues/1664
TypeError Traceback (most recent call last) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~\AppData\Roaming\Python\Python37\site-packages\mars\graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Provider.serialize_model() 797 cpdef serialize_model(self, model_instance, obj=None): --> 798 if obj is None: 799 obj = model_instance.cls(self)() ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 643 exc_info = sys.exc_info() --> 644 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 645 f'value {value}, reason: {exc_info[1]}') \ ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 640 try: --> 641 self._set_value(value, field_obj, field.type, weak_ref=field.weak_ref) 642 except TypeError: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 485 # dict type --> 486 self._set_dict(<dict>value, obj, tp, weak_ref=weak_ref) 487 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_dict() 407 value_obj = obj.dict.values.value.add() --> 408 self._set_value(v, value_obj, tp=tp.value_type if tp is not None else tp) 409 ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 557 if tp is None: --> 558 cls._set_untyped_value(value, obj, weak_ref=weak_ref) 559 else: ~\AppData\Roaming\Python\Python37\site-packages\mars\serialize\pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_untyped_value() 553 else: --> 554 raise TypeError(f'Unknown type to serialize: {type(value)}') 555 TypeError: Failed to set field `extra_params` for Chunk <op=DataFrameFetch, key=08ec6fd4af751d9f5dcec87ea3a6dde3> with value {'order': <TensorOrder.C_ORDER: 'C'>, 'dtype': dtype('<U'), '_i': 0}, reason: Unknown type to serialize: <enum 'TensorOrder'> The above exception was the direct cause of the following exception: ExecutionFailed Traceback (most recent call last) <ipython-input-41-4dd810a40cfc> in <module> ----> 1 fsr_st.to_csv("fsr_st03.csv",encoding="utf8").execute() ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~\AppData\Roaming\Python\Python37\site-packages\mars\core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~\AppData\Roaming\Python\Python37\site-packages\mars\session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in run(self, *tileables, **kw) 212 timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval 213 try: --> 214 if self._check_response_finished(graph_url, timeout_val): 215 break 216 except KeyboardInterrupt: ~\AppData\Roaming\Python\Python37\site-packages\mars\web\session.py in _check_response_finished(self, graph_url, timeout) 172 exc_info = pickle.loads(base64.b64decode(resp_json['exc_info'])) 173 exc = exc_info[1].with_traceback(exc_info[2]) --> 174 raise ExecutionFailed('Graph execution failed.') from exc 175 else: 176 raise ExecutionFailed('Graph execution failed with unknown reason.') ExecutionFailed: 'Graph execution failed.'
TypeError
def _tile_dataframe(cls, op): from ..indexing.iloc import DataFrameIlocGetItem out_df = op.outputs[0] inputs = op.inputs check_chunks_unknown_shape(inputs, TilesError) normalized_nsplits = ( {1: inputs[0].nsplits[1]} if op.axis == 0 else {0: inputs[0].nsplits[0]} ) inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] out_chunks = [] nsplits = [] cum_index = 0 for df in inputs: for c in df.chunks: if op.axis == 0: index = (c.index[0] + cum_index, c.index[1]) else: index = (c.index[0], c.index[1] + cum_index) iloc_op = DataFrameIlocGetItem(indexes=(slice(None),) * 2) out_chunks.append( iloc_op.new_chunk( [c], shape=c.shape, index=index, dtypes=c.dtypes, index_value=c.index_value, columns_value=c.columns_value, ) ) nsplits.extend(df.nsplits[op.axis]) cum_index += len(df.nsplits[op.axis]) out_nsplits = ( (tuple(nsplits), inputs[0].nsplits[1]) if op.axis == 0 else (inputs[0].nsplits[0], tuple(nsplits)) ) if op.ignore_index: out_chunks = standardize_range_index(out_chunks) new_op = op.copy() return new_op.new_dataframes( op.inputs, out_df.shape, nsplits=out_nsplits, chunks=out_chunks, dtypes=out_df.dtypes, index_value=out_df.index_value, columns_value=out_df.columns_value, )
def _tile_dataframe(cls, op): from ..indexing.iloc import DataFrameIlocGetItem out_df = op.outputs[0] inputs = op.inputs normalized_nsplits = ( {1: inputs[0].nsplits[1]} if op.axis == 0 else {0: inputs[0].nsplits[0]} ) inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] out_chunks = [] nsplits = [] cum_index = 0 for df in inputs: for c in df.chunks: if op.axis == 0: index = (c.index[0] + cum_index, c.index[1]) else: index = (c.index[0], c.index[1] + cum_index) iloc_op = DataFrameIlocGetItem(indexes=(slice(None),) * 2) out_chunks.append( iloc_op.new_chunk( [c], shape=c.shape, index=index, dtypes=c.dtypes, index_value=c.index_value, columns_value=c.columns_value, ) ) nsplits.extend(df.nsplits[op.axis]) cum_index += len(df.nsplits[op.axis]) out_nsplits = ( (tuple(nsplits), inputs[0].nsplits[1]) if op.axis == 0 else (inputs[0].nsplits[0], tuple(nsplits)) ) if op.ignore_index: out_chunks = standardize_range_index(out_chunks) new_op = op.copy() return new_op.new_dataframes( op.inputs, out_df.shape, nsplits=out_nsplits, chunks=out_chunks, dtypes=out_df.dtypes, index_value=out_df.index_value, columns_value=out_df.columns_value, )
https://github.com/mars-project/mars/issues/1654
Traceback (most recent call last): File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 382, in execute_graph self._execute_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 648, in prepare_graph self._target_tileable_datas + fetch_tileables, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/context.py", line 72, in h return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 182, in tile return cls._tile_dataframe(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in _tile_dataframe inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in <listcomp> inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/base/rechunk.py", line 96, in rechunk chunk_size = get_nsplits(a, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/rechunk/core.py", line 38, in get_nsplits return decide_chunk_sizes(tileable.shape, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 551, in decide_chunk_sizes return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape)))) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 66, in normalize_chunk_sizes raise ValueError('chunks shape should be of the same length, ' ValueError: chunks shape should be of the same length, got shape: 100000, chunks: (nan, nan, nan, nan, nan, nan)
ValueError
def _tile_series(cls, op): from ..indexing.iloc import SeriesIlocGetItem out = op.outputs[0] inputs = op.inputs out_chunks = [] if op.axis == 1: check_chunks_unknown_shape(inputs, TilesError) inputs = [item.rechunk(op.inputs[0].nsplits)._inplace_tile() for item in inputs] cum_index = 0 nsplits = [] for series in inputs: for c in series.chunks: if op.axis == 0: index = (c.index[0] + cum_index,) shape = c.shape else: index = (c.index[0], cum_index) shape = (c.shape[0], 1) iloc_op = SeriesIlocGetItem(indexes=(slice(None),)) out_chunks.append( iloc_op.new_chunk( [c], shape=shape, index=index, index_value=c.index_value, dtype=c.dtype, name=c.name, ) ) if op.axis == 0: nsplits.extend(series.nsplits[0]) cum_index += len(series.nsplits[op.axis]) else: nsplits.append(1) cum_index += 1 if op.ignore_index: out_chunks = standardize_range_index(out_chunks) new_op = op.copy() if op.axis == 0: nsplits = (tuple(nsplits),) return new_op.new_seriess( op.inputs, out.shape, nsplits=nsplits, chunks=out_chunks, dtype=out.dtype, index_value=out.index_value, name=out.name, ) else: nsplits = (inputs[0].nsplits[0], tuple(nsplits)) return new_op.new_dataframes( op.inputs, out.shape, nsplits=nsplits, chunks=out_chunks, dtypes=out.dtypes, index_value=out.index_value, columns_value=out.columns_value, )
def _tile_series(cls, op): from ..indexing.iloc import SeriesIlocGetItem out = op.outputs[0] inputs = op.inputs out_chunks = [] if op.axis == 1: inputs = [item.rechunk(op.inputs[0].nsplits)._inplace_tile() for item in inputs] cum_index = 0 nsplits = [] for series in inputs: for c in series.chunks: if op.axis == 0: index = (c.index[0] + cum_index,) shape = c.shape else: index = (c.index[0], cum_index) shape = (c.shape[0], 1) iloc_op = SeriesIlocGetItem(indexes=(slice(None),)) out_chunks.append( iloc_op.new_chunk( [c], shape=shape, index=index, index_value=c.index_value, dtype=c.dtype, name=c.name, ) ) if op.axis == 0: nsplits.extend(series.nsplits[0]) cum_index += len(series.nsplits[op.axis]) else: nsplits.append(1) cum_index += 1 if op.ignore_index: out_chunks = standardize_range_index(out_chunks) new_op = op.copy() if op.axis == 0: nsplits = (tuple(nsplits),) return new_op.new_seriess( op.inputs, out.shape, nsplits=nsplits, chunks=out_chunks, dtype=out.dtype, index_value=out.index_value, name=out.name, ) else: nsplits = (inputs[0].nsplits[0], tuple(nsplits)) return new_op.new_dataframes( op.inputs, out.shape, nsplits=nsplits, chunks=out_chunks, dtypes=out.dtypes, index_value=out.index_value, columns_value=out.columns_value, )
https://github.com/mars-project/mars/issues/1654
Traceback (most recent call last): File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 382, in execute_graph self._execute_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 648, in prepare_graph self._target_tileable_datas + fetch_tileables, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/context.py", line 72, in h return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 182, in tile return cls._tile_dataframe(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in _tile_dataframe inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in <listcomp> inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/base/rechunk.py", line 96, in rechunk chunk_size = get_nsplits(a, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/rechunk/core.py", line 38, in get_nsplits return decide_chunk_sizes(tileable.shape, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 551, in decide_chunk_sizes return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape)))) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 66, in normalize_chunk_sizes raise ValueError('chunks shape should be of the same length, ' ValueError: chunks shape should be of the same length, got shape: 100000, chunks: (nan, nan, nan, nan, nan, nan)
ValueError
def watch_workers(self): from kubernetes import client, config cls = type(self) if os.environ.get("KUBE_API_ADDRESS"): # pragma: no cover k8s_config = client.Configuration() k8s_config.host = os.environ["KUBE_API_ADDRESS"] else: k8s_config = config.load_incluster_config() watcher = self.watcher_cls(k8s_config, os.environ["MARS_K8S_POD_NAMESPACE"]) for workers in watcher.watch_workers(): # pragma: no branch if not cls._watcher_running: # pragma: no cover break if self._resource_ref is None: self.set_schedulers(self._cluster_info_ref.get_schedulers()) self._resource_ref = self.get_actor_ref(ResourceActor.default_uid()) if self._resource_ref: # pragma: no branch self._resource_ref.mark_workers_alive(workers)
def watch_workers(self): from kubernetes import client, config cls = type(self) worker_set = set() workers_from_resource = set() if os.environ.get("KUBE_API_ADDRESS"): # pragma: no cover k8s_config = client.Configuration() k8s_config.host = os.environ["KUBE_API_ADDRESS"] else: k8s_config = config.load_incluster_config() watcher = self.watcher_cls(k8s_config, os.environ["MARS_K8S_POD_NAMESPACE"]) for workers in watcher.watch_workers(): # pragma: no branch if not cls._watcher_running: # pragma: no cover break if self._resource_ref is None: self.set_schedulers(self._cluster_info_ref.get_schedulers()) self._resource_ref = self.get_actor_ref(ResourceActor.default_uid()) if self._resource_ref is not None: # pragma: no branch workers_from_resource = set(self._resource_ref.get_worker_endpoints()) removed = (worker_set - set(workers)) or ( worker_set - set(workers_from_resource) ) if removed: logger.debug("Remove of workers %r detected by kubernetes.", removed) if self._resource_ref: # pragma: no branch self._resource_ref.detach_dead_workers( list(removed), _tell=True, _wait=False ) worker_set = set(workers)
https://github.com/mars-project/mars/issues/1654
Traceback (most recent call last): File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 382, in execute_graph self._execute_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 648, in prepare_graph self._target_tileable_datas + fetch_tileables, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/context.py", line 72, in h return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 182, in tile return cls._tile_dataframe(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in _tile_dataframe inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in <listcomp> inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/base/rechunk.py", line 96, in rechunk chunk_size = get_nsplits(a, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/rechunk/core.py", line 38, in get_nsplits return decide_chunk_sizes(tileable.shape, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 551, in decide_chunk_sizes return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape)))) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 66, in normalize_chunk_sizes raise ValueError('chunks shape should be of the same length, ' ValueError: chunks shape should be of the same length, got shape: 100000, chunks: (nan, nan, nan, nan, nan, nan)
ValueError
def tile(cls, op): tensor = op.tensor pk = op.pk out = op.outputs[0] index_path = op.index_path ctx = get_context() fs = None if index_path is not None: fs = get_fs(index_path, op.storage_options) # check index_path for distributed if getattr(ctx, "running_mode", None) == RunningMode.distributed: if index_path is not None: if isinstance(fs, LocalFileSystem): raise ValueError( "`index_path` cannot be local file dir " "for distributed index building" ) if index_path is not None: # check if the index path is empty try: files = [f for f in fs.ls(index_path) if "proxima_" in f] if files: raise ValueError( f"Directory {index_path} contains built proxima index, " f"clean them to perform new index building" ) except FileNotFoundError: # if not exist, create directory fs.mkdir(index_path) # make sure all inputs have known chunk sizes check_chunks_unknown_shape(op.inputs, TilesError) nsplit = decide_unify_split(tensor.nsplits[0], pk.nsplits[0]) if op.topk is not None: nsplit = cls._get_atleast_topk_nsplit(nsplit, op.topk) if tensor.chunk_shape[1] > 1: tensor = tensor.rechunk({0: nsplit, 1: tensor.shape[1]})._inplace_tile() else: tensor = tensor.rechunk({0: nsplit})._inplace_tile() pk = pk.rechunk({0: nsplit})._inplace_tile() out_chunks = [] for chunk, pk_col_chunk in zip(tensor.chunks, pk.chunks): chunk_op = op.copy().reset_key() chunk_op._stage = OperandStage.map out_chunk = chunk_op.new_chunk([chunk, pk_col_chunk], index=pk_col_chunk.index) out_chunks.append(out_chunk) logger.warning(f"index chunks count: {len(out_chunks)} ") params = out.params params["chunks"] = out_chunks params["nsplits"] = ((1,) * len(out_chunks),) new_op = op.copy() return new_op.new_tileables(op.inputs, kws=[params])
def tile(cls, op): tensor = op.tensor pk = op.pk out = op.outputs[0] index_path = op.index_path ctx = get_context() # check index_path for distributed if getattr(ctx, "running_mode", None) == RunningMode.distributed: if index_path is not None: fs = get_fs(index_path, op.storage_options) if isinstance(fs, LocalFileSystem): raise ValueError( "`index_path` cannot be local file dir for distributed index building" ) # make sure all inputs have known chunk sizes check_chunks_unknown_shape(op.inputs, TilesError) nsplit = decide_unify_split(tensor.nsplits[0], pk.nsplits[0]) if op.topk is not None: nsplit = cls._get_atleast_topk_nsplit(nsplit, op.topk) if tensor.chunk_shape[1] > 1: tensor = tensor.rechunk({0: nsplit, 1: tensor.shape[1]})._inplace_tile() else: tensor = tensor.rechunk({0: nsplit})._inplace_tile() pk = pk.rechunk({0: nsplit})._inplace_tile() out_chunks = [] for chunk, pk_col_chunk in zip(tensor.chunks, pk.chunks): chunk_op = op.copy().reset_key() chunk_op._stage = OperandStage.map out_chunk = chunk_op.new_chunk([chunk, pk_col_chunk], index=pk_col_chunk.index) out_chunks.append(out_chunk) logger.warning(f"index chunks count: {len(out_chunks)} ") params = out.params params["chunks"] = out_chunks params["nsplits"] = ((1,) * len(out_chunks),) new_op = op.copy() return new_op.new_tileables(op.inputs, kws=[params])
https://github.com/mars-project/mars/issues/1654
Traceback (most recent call last): File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 382, in execute_graph self._execute_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 648, in prepare_graph self._target_tileable_datas + fetch_tileables, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/context.py", line 72, in h return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 182, in tile return cls._tile_dataframe(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in _tile_dataframe inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in <listcomp> inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/base/rechunk.py", line 96, in rechunk chunk_size = get_nsplits(a, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/rechunk/core.py", line 38, in get_nsplits return decide_chunk_sizes(tileable.shape, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 551, in decide_chunk_sizes return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape)))) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 66, in normalize_chunk_sizes raise ValueError('chunks shape should be of the same length, ' ValueError: chunks shape should be of the same length, got shape: 100000, chunks: (nan, nan, nan, nan, nan, nan)
ValueError
def _execute_map(cls, ctx, op: "ProximaBuilder"): inp = ctx[op.tensor.key] out = op.outputs[0] pks = ctx[op.pk.key] proxima_type = get_proxima_type(inp.dtype) # holder holder = proxima.IndexHolder(type=proxima_type, dimension=op.dimension) for pk, record in zip(pks, inp): pk = pk.item() if hasattr(pk, "item") else pk holder.emplace(pk, record.copy()) # converter meta = proxima.IndexMeta( proxima_type, dimension=op.dimension, measure_name=op.distance_metric ) if op.index_converter is not None: converter = proxima.IndexConverter( name=op.index_converter, meta=meta, params=op.index_converter_params ) converter.train_and_transform(holder) holder = converter.result() meta = converter.meta() # builder && dumper builder = proxima.IndexBuilder( name=op.index_builder, meta=meta, params=op.index_builder_params ) builder = builder.train_and_build(holder) path = tempfile.mkstemp(prefix="proxima-", suffix=".index")[1] dumper = proxima.IndexDumper(name="FileDumper", path=path) builder.dump(dumper) dumper.close() if op.index_path is None: ctx[out.key] = path else: # write to external file system fs = get_fs(op.index_path, op.storage_options) filename = f"proxima_{out.index[0]}_index" out_path = f"{op.index_path.rstrip('/')}/{filename}" with fs.open(out_path, "wb") as out_f: with open(path, "rb") as in_f: # 32M chunk_bytes = 32 * 1024**2 while True: data = in_f.read(chunk_bytes) if data: out_f.write(data) else: break ctx[out.key] = filename
def _execute_map(cls, ctx, op: "ProximaBuilder"): inp = ctx[op.tensor.key] out = op.outputs[0] pks = ctx[op.pk.key] proxima_type = get_proxima_type(inp.dtype) # holder holder = proxima.IndexHolder(type=proxima_type, dimension=op.dimension) for pk, record in zip(pks, inp): pk = pk.item() if hasattr(pk, "item") else pk holder.emplace(pk, record.copy()) # converter meta = proxima.IndexMeta( proxima_type, dimension=op.dimension, measure_name=op.distance_metric ) if op.index_converter is not None: converter = proxima.IndexConverter( name=op.index_converter, meta=meta, params=op.index_converter_params ) converter.train_and_transform(holder) holder = converter.result() meta = converter.meta() # builder && dumper builder = proxima.IndexBuilder( name=op.index_builder, meta=meta, params=op.index_builder_params ) builder = builder.train_and_build(holder) path = tempfile.mkstemp(prefix="proxima-", suffix=".index")[1] dumper = proxima.IndexDumper(name="FileDumper", path=path) builder.dump(dumper) dumper.close() if op.index_path is None: ctx[out.key] = path else: # write to external file system fs = get_fs(op.index_path, op.storage_options) filename = f"proxima-{out.index[0]}.index" out_path = f"{op.index_path.rstrip('/')}/{filename}" with fs.open(out_path, "wb") as out_f: with open(path, "rb") as in_f: # 32M chunk_bytes = 32 * 1024**2 while True: data = in_f.read(chunk_bytes) if data: out_f.write(data) else: break ctx[out.key] = filename
https://github.com/mars-project/mars/issues/1654
Traceback (most recent call last): File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 382, in execute_graph self._execute_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 648, in prepare_graph self._target_tileable_datas + fetch_tileables, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/context.py", line 72, in h return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 182, in tile return cls._tile_dataframe(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in _tile_dataframe inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in <listcomp> inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/base/rechunk.py", line 96, in rechunk chunk_size = get_nsplits(a, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/rechunk/core.py", line 38, in get_nsplits return decide_chunk_sizes(tileable.shape, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 551, in decide_chunk_sizes return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape)))) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 66, in normalize_chunk_sizes raise ValueError('chunks shape should be of the same length, ' ValueError: chunks shape should be of the same length, got shape: 100000, chunks: (nan, nan, nan, nan, nan, nan)
ValueError
def build_index( tensor, pk, dimension=None, index_path=None, need_shuffle=False, distance_metric="SquaredEuclidean", index_builder="SsgBuilder", index_builder_params=None, index_converter=None, index_converter_params=None, topk=None, storage_options=None, run=True, session=None, run_kwargs=None, ): tensor = validate_tensor(tensor) if tensor.dtype not in available_numpy_dtypes: raise ValueError( f"Dtype to build index should be one of {available_numpy_dtypes}, " f"got {tensor.dtype}" ) if dimension is None: dimension = tensor.shape[1] if index_builder_params is None: index_builder_params = {} if index_converter_params is None: index_converter_params = {} if need_shuffle: tensor = mt.random.permutation(tensor) if not isinstance(pk, (Base, Entity)): pk = mt.tensor(pk) op = ProximaBuilder( tensor=tensor, pk=pk, distance_metric=distance_metric, index_path=index_path, dimension=dimension, index_builder=index_builder, index_builder_params=index_builder_params, index_converter=index_converter, index_converter_params=index_converter_params, topk=topk, storage_options=storage_options, ) result = op(tensor, pk) if run: return result.execute(session=session, **(run_kwargs or dict())) else: return result
def build_index( tensor, pk, dimension=None, index_path=None, need_shuffle=False, distance_metric="SquaredEuclidean", index_builder="SsgBuilder", index_builder_params=None, index_converter=None, index_converter_params=None, topk=None, storage_options=None, run=True, session=None, run_kwargs=None, ): tensor = validate_tensor(tensor) if dimension is None: dimension = tensor.shape[1] if index_builder_params is None: index_builder_params = {} if index_converter_params is None: index_converter_params = {} if need_shuffle: tensor = mt.random.permutation(tensor) if not isinstance(pk, (Base, Entity)): pk = mt.tensor(pk) op = ProximaBuilder( tensor=tensor, pk=pk, distance_metric=distance_metric, index_path=index_path, dimension=dimension, index_builder=index_builder, index_builder_params=index_builder_params, index_converter=index_converter, index_converter_params=index_converter_params, topk=topk, storage_options=storage_options, ) result = op(tensor, pk) if run: return result.execute(session=session, **(run_kwargs or dict())) else: return result
https://github.com/mars-project/mars/issues/1654
Traceback (most recent call last): File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 382, in execute_graph self._execute_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 648, in prepare_graph self._target_tileable_datas + fetch_tileables, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/context.py", line 72, in h return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 182, in tile return cls._tile_dataframe(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in _tile_dataframe inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in <listcomp> inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/base/rechunk.py", line 96, in rechunk chunk_size = get_nsplits(a, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/rechunk/core.py", line 38, in get_nsplits return decide_chunk_sizes(tileable.shape, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 551, in decide_chunk_sizes return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape)))) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 66, in normalize_chunk_sizes raise ValueError('chunks shape should be of the same length, ' ValueError: chunks shape should be of the same length, got shape: 100000, chunks: (nan, nan, nan, nan, nan, nan)
ValueError
def tile(cls, op: "ProximaSearcher"): tensor = op.tensor index = op.index topk = op.topk outs = op.outputs # make sure all inputs have known chunk sizes check_chunks_unknown_shape(op.inputs, TilesError) if tensor.chunk_shape[1] > 1: tensor = tensor.rechunk({1: tensor.shape[1]})._inplace_tile() logger.warning(f"query chunks count: {len(tensor.chunks)} ") if hasattr(index, "op"): built_indexes = index.chunks else: # index path fs: FileSystem = get_fs(index, op.storage_options) built_indexes = [ f for f in fs.ls(index) if f.rsplit("/", 1)[-1].startswith("proxima_") ] if hasattr(index, "op"): ctx = get_context() index_chunks_workers = [ m.workers[0] if m.workers else None for m in ctx.get_chunk_metas([c.key for c in index.chunks]) ] else: index_chunks_workers = [None] * len(built_indexes) out_chunks = [], [] for tensor_chunk in tensor.chunks: pk_chunks, distance_chunks = [], [] for j, chunk_index, worker in zip( itertools.count(), built_indexes, index_chunks_workers ): chunk_op = op.copy().reset_key() chunk_op._stage = OperandStage.map if hasattr(chunk_index, "op"): chunk_op._expect_worker = worker chunk_op._index = chunk_index chunk_kws = [ { "index": (tensor_chunk.index[0], j), "dtype": outs[0].dtype, "shape": (tensor_chunk.shape[0], topk), "order": TensorOrder.C_ORDER, }, { "index": (tensor_chunk.index[0], j), "dtype": outs[1].dtype, "shape": (tensor_chunk.shape[0], topk), "order": TensorOrder.C_ORDER, }, ] chunk_inputs = [tensor_chunk] if hasattr(chunk_index, "op"): chunk_inputs.append(chunk_index) pk_chunk, distance_chunk = chunk_op.new_chunks(chunk_inputs, kws=chunk_kws) pk_chunks.append(pk_chunk) distance_chunks.append(distance_chunk) if len(pk_chunks) == 1: out_chunks[0].append(pk_chunks[0]) out_chunks[1].append(distance_chunks[0]) continue shape = (tensor_chunk.shape[0], topk * len(pk_chunks)) pk_merge_op = TensorConcatenate(axis=1) pk_merge_chunk = pk_merge_op.new_chunk( pk_chunks, index=(pk_chunks[0].index[0], 0), shape=shape, dtype=pk_chunks[0].dtype, order=pk_chunks[0].order, ) distance_merge_op = TensorConcatenate(axis=1) distance_merge_chunk = distance_merge_op.new_chunk( distance_chunks, index=(distance_chunks[0].index[0], 0), shape=shape, dtype=distance_chunks[0].dtype, order=distance_chunks[0].order, ) agg_op = ProximaSearcher( stage=OperandStage.agg, topk=op.topk, distance_metric=op.distance_metric ) agg_chunk_kws = [ { "index": pk_merge_chunk.index, "dtype": outs[0].dtype, "shape": (tensor_chunk.shape[0], topk), "order": outs[0].order, }, { "index": pk_merge_chunk.index, "dtype": outs[1].dtype, "shape": (tensor_chunk.shape[0], topk), "order": outs[1].order, }, ] pk_result_chunk, distance_result_chunk = agg_op.new_chunks( [pk_merge_chunk, distance_merge_chunk], kws=agg_chunk_kws ) out_chunks[0].append(pk_result_chunk) out_chunks[1].append(distance_result_chunk) logger.warning(f"query out_chunks count: {len(out_chunks)} ") kws = [] pk_params = outs[0].params pk_params["chunks"] = out_chunks[0] pk_params["nsplits"] = (tensor.nsplits[0], (topk,)) kws.append(pk_params) distance_params = outs[1].params distance_params["chunks"] = out_chunks[1] distance_params["nsplits"] = (tensor.nsplits[0], (topk,)) kws.append(distance_params) new_op = op.copy() return new_op.new_tileables(op.inputs, kws=kws)
def tile(cls, op: "ProximaSearcher"): tensor = op.tensor index = op.index topk = op.topk outs = op.outputs # make sure all inputs have known chunk sizes check_chunks_unknown_shape(op.inputs, TilesError) if tensor.chunk_shape[1] > 1: tensor = tensor.rechunk({1: tensor.shape[1]})._inplace_tile() logger.warning(f"query chunks count: {len(tensor.chunks)} ") if hasattr(index, "op"): built_indexes = index.chunks else: # index path fs: FileSystem = get_fs(index, op.storage_options) built_indexes = [ f for f in fs.ls(index) if f.rsplit("/", 1)[-1].startswith("proxima-") ] if hasattr(index, "op"): ctx = get_context() index_chunks_workers = [ m.workers[0] if m.workers else None for m in ctx.get_chunk_metas([c.key for c in index.chunks]) ] else: index_chunks_workers = [None] * len(built_indexes) out_chunks = [], [] for tensor_chunk in tensor.chunks: pk_chunks, distance_chunks = [], [] for j, chunk_index, worker in zip( itertools.count(), built_indexes, index_chunks_workers ): chunk_op = op.copy().reset_key() chunk_op._stage = OperandStage.map if hasattr(chunk_index, "op"): chunk_op._expect_worker = worker chunk_op._index = chunk_index chunk_kws = [ { "index": (tensor_chunk.index[0], j), "dtype": outs[0].dtype, "shape": (tensor_chunk.shape[0], topk), "order": TensorOrder.C_ORDER, }, { "index": (tensor_chunk.index[0], j), "dtype": outs[1].dtype, "shape": (tensor_chunk.shape[0], topk), "order": TensorOrder.C_ORDER, }, ] chunk_inputs = [tensor_chunk] if hasattr(chunk_index, "op"): chunk_inputs.append(chunk_index) pk_chunk, distance_chunk = chunk_op.new_chunks(chunk_inputs, kws=chunk_kws) pk_chunks.append(pk_chunk) distance_chunks.append(distance_chunk) if len(pk_chunks) == 1: out_chunks[0].append(pk_chunks[0]) out_chunks[1].append(distance_chunks[0]) continue shape = (tensor_chunk.shape[0], topk * len(pk_chunks)) pk_merge_op = TensorConcatenate(axis=1) pk_merge_chunk = pk_merge_op.new_chunk( pk_chunks, index=(pk_chunks[0].index[0], 0), shape=shape, dtype=pk_chunks[0].dtype, order=pk_chunks[0].order, ) distance_merge_op = TensorConcatenate(axis=1) distance_merge_chunk = distance_merge_op.new_chunk( distance_chunks, index=(distance_chunks[0].index[0], 0), shape=shape, dtype=distance_chunks[0].dtype, order=distance_chunks[0].order, ) agg_op = ProximaSearcher( stage=OperandStage.agg, topk=op.topk, distance_metric=op.distance_metric ) agg_chunk_kws = [ { "index": pk_merge_chunk.index, "dtype": outs[0].dtype, "shape": (tensor_chunk.shape[0], topk), "order": outs[0].order, }, { "index": pk_merge_chunk.index, "dtype": outs[1].dtype, "shape": (tensor_chunk.shape[0], topk), "order": outs[1].order, }, ] pk_result_chunk, distance_result_chunk = agg_op.new_chunks( [pk_merge_chunk, distance_merge_chunk], kws=agg_chunk_kws ) out_chunks[0].append(pk_result_chunk) out_chunks[1].append(distance_result_chunk) logger.warning(f"query out_chunks count: {len(out_chunks)} ") kws = [] pk_params = outs[0].params pk_params["chunks"] = out_chunks[0] pk_params["nsplits"] = (tensor.nsplits[0], (topk,)) kws.append(pk_params) distance_params = outs[1].params distance_params["chunks"] = out_chunks[1] distance_params["nsplits"] = (tensor.nsplits[0], (topk,)) kws.append(distance_params) new_op = op.copy() return new_op.new_tileables(op.inputs, kws=kws)
https://github.com/mars-project/mars/issues/1654
Traceback (most recent call last): File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 382, in execute_graph self._execute_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 410, in _execute_graph self.prepare_graph(compose=compose) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 648, in prepare_graph self._target_tileable_datas + fetch_tileables, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 348, in build tileables, tileable_graph=tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 262, in build self._on_tile_failure(tileable_data.op, exc_info) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 301, in inner raise exc_info[1].with_traceback(exc_info[2]) from None File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 242, in build tiled = self._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 337, in _tile return super()._tile(tileable_data, tileable_graph) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 203, in _tile tds = on_tile(tileable_data.op.outputs, tds) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/scheduler/graph.py", line 630, in on_tile return self.context.wraps(handler.dispatch)(first.op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/context.py", line 72, in h return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tiles.py", line 119, in dispatch tiled = op_cls.tile(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 182, in tile return cls._tile_dataframe(op) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in _tile_dataframe inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/merge/concat.py", line 94, in <listcomp> inputs = [item.rechunk(normalized_nsplits)._inplace_tile() for item in inputs] File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/dataframe/base/rechunk.py", line 96, in rechunk chunk_size = get_nsplits(a, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/rechunk/core.py", line 38, in get_nsplits return decide_chunk_sizes(tileable.shape, chunk_size, itemsize) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 551, in decide_chunk_sizes return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape)))) File "/home/admin/work/tip_dev-pymars-0.6.0a3.zip/mars/tensor/utils.py", line 66, in normalize_chunk_sizes raise ValueError('chunks shape should be of the same length, ' ValueError: chunks shape should be of the same length, got shape: 100000, chunks: (nan, nan, nan, nan, nan, nan)
ValueError
def execute_sort_values(data, op, inplace=None, by=None): if inplace is None: inplace = op.inplace # ignore_index is new in Pandas version 1.0.0. ignore_index = getattr(op, "ignore_index", False) if isinstance(data, (pd.DataFrame, pd.Series)): kwargs = dict( axis=op.axis, ascending=op.ascending, ignore_index=ignore_index, na_position=op.na_position, kind=op.kind, ) if isinstance(data, pd.DataFrame): kwargs["by"] = by if by is not None else op.by if inplace: kwargs["inplace"] = True try: data.sort_values(**kwargs) except TypeError: # pragma: no cover kwargs.pop("ignore_index", None) data.sort_values(**kwargs) return data else: try: return data.sort_values(**kwargs) except TypeError: # pragma: no cover kwargs.pop("ignore_index", None) return data.sort_values(**kwargs) else: # pragma: no cover # cudf doesn't support axis and kind if isinstance(data, cudf.DataFrame): return data.sort_values( op.by, ascending=op.ascending, na_position=op.na_position ) else: return data.sort_values(ascending=op.ascending, na_position=op.na_position)
def execute_sort_values(data, op, inplace=None): if inplace is None: inplace = op.inplace # ignore_index is new in Pandas version 1.0.0. ignore_index = getattr(op, "ignore_index", False) if isinstance(data, (pd.DataFrame, pd.Series)): kwargs = dict( axis=op.axis, ascending=op.ascending, ignore_index=ignore_index, na_position=op.na_position, kind=op.kind, ) if isinstance(data, pd.DataFrame): kwargs["by"] = op.by if inplace: kwargs["inplace"] = True try: data.sort_values(**kwargs) except TypeError: # pragma: no cover kwargs.pop("ignore_index", None) data.sort_values(**kwargs) return data else: try: return data.sort_values(**kwargs) except TypeError: # pragma: no cover kwargs.pop("ignore_index", None) return data.sort_values(**kwargs) else: # pragma: no cover # cudf doesn't support axis and kind if isinstance(data, cudf.DataFrame): return data.sort_values( op.by, ascending=op.ascending, na_position=op.na_position ) else: return data.sort_values(ascending=op.ascending, na_position=op.na_position)
https://github.com/mars-project/mars/issues/1641
2020-10-19 19:46:44,463 Unexpected exception occurred in BaseCalcActor._calc_results. graph_key=cfd4b1a2cc914a2b30aa228eda1e7ea8 Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/worker/calc.py", line 201, in _calc_results self._execution_pool.submit(executor.execute_graph, graph, File "src/gevent/event.py", line 383, in gevent._gevent_cevent.AsyncResult.result File "src/gevent/event.py", line 305, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 335, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 323, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 303, in gevent._gevent_cevent.AsyncResult._raise_exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/_compat.py", line 65, in reraise raise value.with_traceback(tb) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/threadpool.py", line 142, in __run_task thread_result.set(func(*args, **kwargs)) File "mars/actors/pool/gevent_pool.pyx", line 127, in mars.actors.pool.gevent_pool.GeventThreadPool._wrap_watch.inner File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun.swj/Code/mars/mars/dataframe/sort/psrs.py", line 357, in execute ctx[op.outputs[-1].key] = res[by].iloc[slc] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/frame.py", line 2908, in __getitem__ indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1254, in _get_listlike_indexer self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1304, in _validate_read_indexer raise KeyError(f"{not_found} not in index") KeyError: "['__PSRS_TMP_DISTINCT_COL'] not in index"
KeyError
def execute(cls, ctx, op): a = ctx[op.inputs[0].key] if op.sort_type == "sort_values": ctx[op.outputs[0].key] = res = execute_sort_values(a, op) else: ctx[op.outputs[0].key] = res = execute_sort_index(a, op) by = op.by add_distinct_col = ( bool(int(os.environ.get("PSRS_DISTINCT_COL", "0"))) or getattr(ctx, "running_mode", None) == RunningMode.distributed ) if ( add_distinct_col and isinstance(a, pd.DataFrame) and op.sort_type == "sort_values" ): # when running under distributed mode, we introduce an extra column # to make sure pivots are distinct chunk_idx = op.inputs[0].index[0] distinct_col = ( _PSRS_DISTINCT_COL if a.columns.nlevels == 1 else (_PSRS_DISTINCT_COL,) + ("",) * (a.columns.nlevels - 1) ) res[distinct_col] = np.arange( chunk_idx << 32, (chunk_idx << 32) + len(a), dtype=np.int64 ) by = list(by) + [distinct_col] n = op.n_partition if a.shape[op.axis] < n: num = n // a.shape[op.axis] + 1 res = execute_sort_values(pd.concat([res] * num), op, by=by) w = int(res.shape[op.axis] // n) slc = (slice(None),) * op.axis + (slice(0, n * w, w),) if op.sort_type == "sort_values": # do regular sample if op.by is not None: ctx[op.outputs[-1].key] = res[by].iloc[slc] else: ctx[op.outputs[-1].key] = res.iloc[slc] else: # do regular sample ctx[op.outputs[-1].key] = res.iloc[slc]
def execute(cls, ctx, op): a = ctx[op.inputs[0].key] if op.sort_type == "sort_values": ctx[op.outputs[0].key] = res = execute_sort_values(a, op) else: ctx[op.outputs[0].key] = res = execute_sort_index(a, op) by = op.by if ( getattr(ctx, "running_mode", None) == RunningMode.distributed and isinstance(a, pd.DataFrame) and op.sort_type == "sort_values" ): # when running under distributed mode, we introduce an extra column # to make sure pivots are distinct chunk_idx = op.inputs[0].index[0] distinct_col = ( _PSRS_DISTINCT_COL if a.columns.nlevels == 1 else (_PSRS_DISTINCT_COL,) + ("",) * (a.columns.nlevels - 1) ) res[distinct_col] = np.arange(chunk_idx << 32, (chunk_idx << 32) + len(a)) by = list(by) + [distinct_col] n = op.n_partition if a.shape[op.axis] < n: num = n // a.shape[op.axis] + 1 res = execute_sort_values(pd.concat([a] * num), op) w = int(res.shape[op.axis] // n) slc = (slice(None),) * op.axis + (slice(0, n * w, w),) if op.sort_type == "sort_values": # do regular sample if op.by is not None: ctx[op.outputs[-1].key] = res[by].iloc[slc] else: ctx[op.outputs[-1].key] = res.iloc[slc] else: # do regular sample ctx[op.outputs[-1].key] = res.iloc[slc]
https://github.com/mars-project/mars/issues/1641
2020-10-19 19:46:44,463 Unexpected exception occurred in BaseCalcActor._calc_results. graph_key=cfd4b1a2cc914a2b30aa228eda1e7ea8 Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/worker/calc.py", line 201, in _calc_results self._execution_pool.submit(executor.execute_graph, graph, File "src/gevent/event.py", line 383, in gevent._gevent_cevent.AsyncResult.result File "src/gevent/event.py", line 305, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 335, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 323, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 303, in gevent._gevent_cevent.AsyncResult._raise_exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/_compat.py", line 65, in reraise raise value.with_traceback(tb) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/threadpool.py", line 142, in __run_task thread_result.set(func(*args, **kwargs)) File "mars/actors/pool/gevent_pool.pyx", line 127, in mars.actors.pool.gevent_pool.GeventThreadPool._wrap_watch.inner File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun.swj/Code/mars/mars/dataframe/sort/psrs.py", line 357, in execute ctx[op.outputs[-1].key] = res[by].iloc[slc] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/frame.py", line 2908, in __getitem__ indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1254, in _get_listlike_indexer self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1304, in _validate_read_indexer raise KeyError(f"{not_found} not in index") KeyError: "['__PSRS_TMP_DISTINCT_COL'] not in index"
KeyError
def _execute_dataframe_map(cls, ctx, op): a, pivots = [ctx[c.key] for c in op.inputs] out = op.outputs[0] if isinstance(a, pd.DataFrame): # use numpy.searchsorted to find split positions. by = op.by distinct_col = ( _PSRS_DISTINCT_COL if a.columns.nlevels == 1 else (_PSRS_DISTINCT_COL,) + ("",) * (a.columns.nlevels - 1) ) if distinct_col in a.columns: by += [distinct_col] records = a[by].to_records(index=False) p_records = pivots.to_records(index=False) if op.ascending: poses = records.searchsorted(p_records, side="right") else: poses = len(records) - records[::-1].searchsorted(p_records, side="right") del records, p_records poses = (None,) + tuple(poses) + (None,) for i in range(op.n_partition): values = a.iloc[poses[i] : poses[i + 1]] ctx[(out.key, str(i))] = values else: # pragma: no cover # for cudf, find split positions in loops. if op.ascending: pivots.append(a.iloc[-1][op.by]) for i in range(op.n_partition): selected = a for label in op.by: selected = selected.loc[a[label] <= pivots.iloc[i][label]] ctx[(out.key, str(i))] = selected else: pivots.append(a.iloc[-1][op.by]) for i in range(op.n_partition): selected = a for label in op.by: selected = selected.loc[a[label] >= pivots.iloc[i][label]] ctx[(out.key, str(i))] = selected
def _execute_dataframe_map(cls, ctx, op): a, pivots = [ctx[c.key] for c in op.inputs] out = op.outputs[0] if isinstance(a, pd.DataFrame): # use numpy.searchsorted to find split positions. by = op.by distinct_col = ( _PSRS_DISTINCT_COL if a.columns.nlevels == 1 else (_PSRS_DISTINCT_COL,) + ("",) * (a.columns.nlevels - 1) ) if _PSRS_DISTINCT_COL in a.columns: by += [distinct_col] records = a[by].to_records(index=False) p_records = pivots.to_records(index=False) if op.ascending: poses = records.searchsorted(p_records, side="right") else: poses = len(records) - records[::-1].searchsorted(p_records, side="right") del records, p_records poses = (None,) + tuple(poses) + (None,) for i in range(op.n_partition): values = a.iloc[poses[i] : poses[i + 1]] ctx[(out.key, str(i))] = values else: # pragma: no cover # for cudf, find split positions in loops. if op.ascending: pivots.append(a.iloc[-1][op.by]) for i in range(op.n_partition): selected = a for label in op.by: selected = selected.loc[a[label] <= pivots.iloc[i][label]] ctx[(out.key, str(i))] = selected else: pivots.append(a.iloc[-1][op.by]) for i in range(op.n_partition): selected = a for label in op.by: selected = selected.loc[a[label] >= pivots.iloc[i][label]] ctx[(out.key, str(i))] = selected
https://github.com/mars-project/mars/issues/1641
2020-10-19 19:46:44,463 Unexpected exception occurred in BaseCalcActor._calc_results. graph_key=cfd4b1a2cc914a2b30aa228eda1e7ea8 Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/worker/calc.py", line 201, in _calc_results self._execution_pool.submit(executor.execute_graph, graph, File "src/gevent/event.py", line 383, in gevent._gevent_cevent.AsyncResult.result File "src/gevent/event.py", line 305, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 335, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 323, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 303, in gevent._gevent_cevent.AsyncResult._raise_exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/_compat.py", line 65, in reraise raise value.with_traceback(tb) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/threadpool.py", line 142, in __run_task thread_result.set(func(*args, **kwargs)) File "mars/actors/pool/gevent_pool.pyx", line 127, in mars.actors.pool.gevent_pool.GeventThreadPool._wrap_watch.inner File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun.swj/Code/mars/mars/dataframe/sort/psrs.py", line 357, in execute ctx[op.outputs[-1].key] = res[by].iloc[slc] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/frame.py", line 2908, in __getitem__ indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1254, in _get_listlike_indexer self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1304, in _validate_read_indexer raise KeyError(f"{not_found} not in index") KeyError: "['__PSRS_TMP_DISTINCT_COL'] not in index"
KeyError
def loads(buf): mv = memoryview(buf) header = read_file_header(mv) compress = header.compress if compress == CompressType.NONE: data = buf[HEADER_LENGTH:] else: data = decompressors[compress](mv[HEADER_LENGTH:]) if header.type == SerialType.ARROW: try: return deserialize(memoryview(data)) except pyarrow.lib.ArrowInvalid: # pragma: no cover # reconstruct value from buffers of arrow components data_view = memoryview(data) meta_block_size = np.frombuffer(data_view[0:4], dtype="int32").item() meta = pickle.loads(data_view[4 : 4 + meta_block_size]) # nosec buffer_sizes = meta.pop("buffer_sizes") bounds = np.cumsum([4 + meta_block_size] + buffer_sizes) meta["data"] = [ pyarrow.py_buffer(data_view[bounds[idx] : bounds[idx + 1]]) for idx in range(len(buffer_sizes)) ] return pyarrow.deserialize_components(meta, mars_serialize_context()) else: return pickle.loads(data)
def loads(buf): mv = memoryview(buf) header = read_file_header(mv) compress = header.compress if compress == CompressType.NONE: data = buf[HEADER_LENGTH:] else: data = decompressors[compress](mv[HEADER_LENGTH:]) if header.type == SerialType.ARROW: try: return pyarrow.deserialize(memoryview(data), mars_serialize_context()) except pyarrow.lib.ArrowInvalid: # pragma: no cover # reconstruct value from buffers of arrow components data_view = memoryview(data) meta_block_size = np.frombuffer(data_view[0:4], dtype="int32").item() meta = pickle.loads(data_view[4 : 4 + meta_block_size]) # nosec buffer_sizes = meta.pop("buffer_sizes") bounds = np.cumsum([4 + meta_block_size] + buffer_sizes) meta["data"] = [ pyarrow.py_buffer(data_view[bounds[idx] : bounds[idx + 1]]) for idx in range(len(buffer_sizes)) ] return pyarrow.deserialize_components(meta, mars_serialize_context()) else: return pickle.loads(data)
https://github.com/mars-project/mars/issues/1641
2020-10-19 19:46:44,463 Unexpected exception occurred in BaseCalcActor._calc_results. graph_key=cfd4b1a2cc914a2b30aa228eda1e7ea8 Traceback (most recent call last): File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/worker/calc.py", line 201, in _calc_results self._execution_pool.submit(executor.execute_graph, graph, File "src/gevent/event.py", line 383, in gevent._gevent_cevent.AsyncResult.result File "src/gevent/event.py", line 305, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 335, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 323, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 303, in gevent._gevent_cevent.AsyncResult._raise_exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/_compat.py", line 65, in reraise raise value.with_traceback(tb) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/gevent/threadpool.py", line 142, in __run_task thread_result.set(func(*args, **kwargs)) File "mars/actors/pool/gevent_pool.pyx", line 127, in mars.actors.pool.gevent_pool.GeventThreadPool._wrap_watch.inner File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun.swj/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun.swj/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun.swj/Code/mars/mars/dataframe/sort/psrs.py", line 357, in execute ctx[op.outputs[-1].key] = res[by].iloc[slc] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/frame.py", line 2908, in __getitem__ indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1] File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1254, in _get_listlike_indexer self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing) File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1304, in _validate_read_indexer raise KeyError(f"{not_found} not in index") KeyError: "['__PSRS_TMP_DISTINCT_COL'] not in index"
KeyError
def _execute_map(cls, ctx, op): (data,), device_id, xp = as_same_device( [ctx[op.inputs[0].key]], device=op.device, ret_extra=True ) index = ctx[op.inputs[1].key] if len(op.inputs) == 2 else None with device(device_id): data = xp.ascontiguousarray(data) if index is not None: # fetch the trained index trained_index = _load_index(ctx, op, index, device_id) return_index_type = _get_index_type(op.return_index_type, ctx) if return_index_type == "object": # clone a new one, # because faiss does not ensure thread-safe for operations that change index # https://github.com/facebookresearch/faiss/wiki/Threads-and-asynchronous-calls#thread-safety trained_index = faiss.clone_index(trained_index) else: trained_index = faiss.index_factory( data.shape[1], op.faiss_index, op.faiss_metric_type ) if op.same_distribution: # no need to train, just create index pass else: # distribution no the same, train on each chunk trained_index.train(data) if device_id >= 0: # pragma: no cover trained_index = _index_to_gpu(trained_index, device_id) if op.metric == "cosine": # faiss does not support cosine distances directly, # data needs to be normalize before adding to index, # refer to: # https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance faiss.normalize_L2(data) # add data into index if device_id >= 0: # pragma: no cover # gpu trained_index.add_c(data.shape[0], _swig_ptr_from_cupy_float32_array(data)) else: trained_index.add(data) ctx[op.outputs[0].key] = _store_index(ctx, op, trained_index, device_id)
def _execute_map(cls, ctx, op): (data,), device_id, _ = as_same_device( [ctx[op.inputs[0].key]], device=op.device, ret_extra=True ) index = ctx[op.inputs[1].key] if len(op.inputs) == 2 else None with device(device_id): if index is not None: # fetch the trained index trained_index = _load_index(ctx, op, index, device_id) return_index_type = _get_index_type(op.return_index_type, ctx) if return_index_type == "object": # clone a new one, # because faiss does not ensure thread-safe for operations that change index # https://github.com/facebookresearch/faiss/wiki/Threads-and-asynchronous-calls#thread-safety trained_index = faiss.clone_index(trained_index) else: trained_index = faiss.index_factory( data.shape[1], op.faiss_index, op.faiss_metric_type ) if op.same_distribution: # no need to train, just create index pass else: # distribution no the same, train on each chunk trained_index.train(data) if device_id >= 0: # pragma: no cover trained_index = _index_to_gpu(trained_index, device_id) if op.metric == "cosine": # faiss does not support cosine distances directly, # data needs to be normalize before adding to index, # refer to: # https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance faiss.normalize_L2(data) # add data into index if device_id >= 0: # pragma: no cover # gpu trained_index.add_c(data.shape[0], _swig_ptr_from_cupy_float32_array(data)) else: trained_index.add(data) ctx[op.outputs[0].key] = _store_index(ctx, op, trained_index, device_id)
https://github.com/mars-project/mars/issues/1629
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 648 try: --> 649 self._set_tuple(value, field_obj, tp=field.type, weak_ref=field.weak_ref) 650 except TypeError: ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple() 364 it_obj = Value() --> 365 self._set_value(val, it_obj, tp=tp.type if tp is not None else tp) 366 res.append(it_obj) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 443 value_field = PRIMITIVE_TYPE_TO_VALUE_FIELD[tp] --> 444 setattr(obj, value_field, value) 445 elif tp is ValueType.slice: TypeError: 10.0 has type float, but expected one of: int, long The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~/Workspace/mars/mars/graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Provider.serialize_model() 797 for name, field in model_instance._FIELDS.items(): --> 798 field.serialize(self, model_instance, obj) 799 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Provider.serialize_model() 797 for name, field in model_instance._FIELDS.items(): --> 798 field.serialize(self, model_instance, obj) 799 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 651 exc_info = sys.exc_info() --> 652 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 653 f'value {value}, reason: {exc_info[1]}') \ ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 648 try: --> 649 self._set_tuple(value, field_obj, tp=field.type, weak_ref=field.weak_ref) 650 except TypeError: ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple() 364 it_obj = Value() --> 365 self._set_value(val, it_obj, tp=tp.type if tp is not None else tp) 366 res.append(it_obj) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 443 value_field = PRIMITIVE_TYPE_TO_VALUE_FIELD[tp] --> 444 setattr(obj, value_field, value) 445 elif tp is ValueType.slice: TypeError: Failed to set field `shape` for Chunk <op=TensorTranspose, key=6fd87469bf886695950bdfb3164a7ce7> with value (10.0, -1), reason: 10.0 has type float, but expected one of: int, long
TypeError
def __call__(self, a): shape = tuple(s if np.isnan(s) else int(s) for s in _reorder(a.shape, self._axes)) if self._axes == list(reversed(range(a.ndim))): # order reversed tensor_order = reverse_order(a.order) else: tensor_order = TensorOrder.C_ORDER return self.new_tensor([a], shape, order=tensor_order)
def __call__(self, a): shape = _reorder(a.shape, self._axes) if self._axes == list(reversed(range(a.ndim))): # order reversed tensor_order = reverse_order(a.order) else: tensor_order = TensorOrder.C_ORDER return self.new_tensor([a], shape, order=tensor_order)
https://github.com/mars-project/mars/issues/1629
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 648 try: --> 649 self._set_tuple(value, field_obj, tp=field.type, weak_ref=field.weak_ref) 650 except TypeError: ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple() 364 it_obj = Value() --> 365 self._set_value(val, it_obj, tp=tp.type if tp is not None else tp) 366 res.append(it_obj) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 443 value_field = PRIMITIVE_TYPE_TO_VALUE_FIELD[tp] --> 444 setattr(obj, value_field, value) 445 elif tp is ValueType.slice: TypeError: 10.0 has type float, but expected one of: int, long The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~/Workspace/mars/mars/graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Provider.serialize_model() 797 for name, field in model_instance._FIELDS.items(): --> 798 field.serialize(self, model_instance, obj) 799 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Provider.serialize_model() 797 for name, field in model_instance._FIELDS.items(): --> 798 field.serialize(self, model_instance, obj) 799 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 651 exc_info = sys.exc_info() --> 652 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 653 f'value {value}, reason: {exc_info[1]}') \ ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 648 try: --> 649 self._set_tuple(value, field_obj, tp=field.type, weak_ref=field.weak_ref) 650 except TypeError: ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple() 364 it_obj = Value() --> 365 self._set_value(val, it_obj, tp=tp.type if tp is not None else tp) 366 res.append(it_obj) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 443 value_field = PRIMITIVE_TYPE_TO_VALUE_FIELD[tp] --> 444 setattr(obj, value_field, value) 445 elif tp is ValueType.slice: TypeError: Failed to set field `shape` for Chunk <op=TensorTranspose, key=6fd87469bf886695950bdfb3164a7ce7> with value (10.0, -1), reason: 10.0 has type float, but expected one of: int, long
TypeError
def tile(cls, op): tensor = op.outputs[0] out_chunks = [] for c in op.inputs[0].chunks: chunk_op = op.copy().reset_key() chunk_shape = tuple( s if np.isnan(s) else int(s) for s in _reorder(c.shape, op.axes) ) chunk_idx = _reorder(c.index, op.axes) out_chunk = chunk_op.new_chunk( [c], shape=chunk_shape, index=chunk_idx, order=tensor.order ) out_chunks.append(out_chunk) new_op = op.copy() nsplits = _reorder(op.inputs[0].nsplits, op.axes) return new_op.new_tensors( op.inputs, op.outputs[0].shape, order=tensor.order, chunks=out_chunks, nsplits=nsplits, )
def tile(cls, op): tensor = op.outputs[0] out_chunks = [] for c in op.inputs[0].chunks: chunk_op = op.copy().reset_key() chunk_shape = _reorder(c.shape, op.axes) chunk_idx = _reorder(c.index, op.axes) out_chunk = chunk_op.new_chunk( [c], shape=chunk_shape, index=chunk_idx, order=tensor.order ) out_chunks.append(out_chunk) new_op = op.copy() nsplits = _reorder(op.inputs[0].nsplits, op.axes) return new_op.new_tensors( op.inputs, op.outputs[0].shape, order=tensor.order, chunks=out_chunks, nsplits=nsplits, )
https://github.com/mars-project/mars/issues/1629
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 648 try: --> 649 self._set_tuple(value, field_obj, tp=field.type, weak_ref=field.weak_ref) 650 except TypeError: ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple() 364 it_obj = Value() --> 365 self._set_value(val, it_obj, tp=tp.type if tp is not None else tp) 366 res.append(it_obj) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 443 value_field = PRIMITIVE_TYPE_TO_VALUE_FIELD[tp] --> 444 setattr(obj, value_field, value) 445 elif tp is ValueType.slice: TypeError: 10.0 has type float, but expected one of: int, long The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in _execute_graph() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in create_operand_actors() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in _wrapped() /home/admin/work/public-mars-0.5.1.zip/mars/scheduler/graph.py in get_executable_operand_dag() /home/admin/work/public-mars-0.5.1.zip/mars/utils.py in serialize_graph() ~/Workspace/mars/mars/graph.pyx in mars.graph.DirectedGraph.to_pb() 420 return graph --> 421 422 def to_pb(self, pb_obj=None, data_serial_type=None, pickle_protocol=None): ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.to_pb() 686 pickle_protocol=pickle_protocol) --> 687 return self.serialize(provider, obj=obj) 688 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Provider.serialize_model() 797 for name, field in model_instance._FIELDS.items(): --> 798 field.serialize(self, model_instance, obj) 799 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 630 if val is not None: --> 631 self._serial_reference_value(tag, field.type.type.model, val, it_obj) 632 elif isinstance(it_obj, Value): ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._serial_reference_value() 572 field_obj = value.cls(self)() --> 573 value.serialize(self, obj=field_obj) 574 value_pb.type_id = value.__serializable_index__ ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Serializable.serialize() 669 def serialize(self, Provider provider, obj=None): --> 670 return provider.serialize_model(self, obj=obj) 671 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Provider.serialize_model() 797 for name, field in model_instance._FIELDS.items(): --> 798 field.serialize(self, model_instance, obj) 799 ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 154 --> 155 cpdef serialize(self, Provider provider, model_instance, obj): 156 return provider.serialize_field(self, model_instance, obj) ~/Workspace/mars/mars/serialize/core.pyx in mars.serialize.core.Field.serialize() 155 cpdef serialize(self, Provider provider, model_instance, obj): --> 156 return provider.serialize_field(self, model_instance, obj) 157 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 651 exc_info = sys.exc_info() --> 652 raise TypeError(f'Failed to set field `{tag}` for {model_instance} with ' 653 f'value {value}, reason: {exc_info[1]}') \ ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field() 648 try: --> 649 self._set_tuple(value, field_obj, tp=field.type, weak_ref=field.weak_ref) 650 except TypeError: ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple() 364 it_obj = Value() --> 365 self._set_value(val, it_obj, tp=tp.type if tp is not None else tp) 366 res.append(it_obj) ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value() 559 else: --> 560 cls._set_typed_value(value, obj, tp, weak_ref=weak_ref) 561 ~/Workspace/mars/mars/serialize/pbserializer.pyx in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value() 443 value_field = PRIMITIVE_TYPE_TO_VALUE_FIELD[tp] --> 444 setattr(obj, value_field, value) 445 elif tp is ValueType.slice: TypeError: Failed to set field `shape` for Chunk <op=TensorTranspose, key=6fd87469bf886695950bdfb3164a7ce7> with value (10.0, -1), reason: 10.0 has type float, but expected one of: int, long
TypeError
def _pandas_read_csv(cls, f, op): csv_kwargs = op.extra_params.copy() out_df = op.outputs[0] start, end = _find_chunk_start_end(f, op.offset, op.size) f.seek(start) b = FixedSizeFileObject(f, end - start) if hasattr(out_df, "dtypes"): dtypes = out_df.dtypes else: # Output will be a Series in some optimize rules. dtypes = pd.Series([out_df.dtype], index=[out_df.name]) if end == start: # the last chunk may be empty df = build_empty_df(dtypes) if op.keep_usecols_order and not isinstance(op.usecols, list): # convert to Series, if usecols is a scalar df = df[op.usecols] else: if start == 0: # The first chunk contains header # As we specify names and dtype, we need to skip header rows csv_kwargs["skiprows"] = 1 if op.header == "infer" else op.header if op.usecols: usecols = op.usecols if isinstance(op.usecols, list) else [op.usecols] else: usecols = op.usecols if contain_arrow_dtype(dtypes): # when keep_default_na is True which is default, # will replace null value with np.nan, # which will cause failure when converting to arrow string array csv_kwargs["keep_default_na"] = False csv_kwargs["dtype"] = cls._select_arrow_dtype(dtypes) df = pd.read_csv( b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, nrows=op.nrows, **csv_kwargs, ) if op.keep_usecols_order: df = df[op.usecols] return df
def _pandas_read_csv(cls, f, op): csv_kwargs = op.extra_params.copy() out_df = op.outputs[0] start, end = _find_chunk_start_end(f, op.offset, op.size) f.seek(start) b = FixedSizeFileObject(f, end - start) if hasattr(out_df, "dtypes"): dtypes = out_df.dtypes else: # Output will be a Series in some optimize rules. dtypes = pd.Series([out_df.dtype], index=[out_df.name]) if end == start: # the last chunk may be empty df = build_empty_df(dtypes) if op.keep_usecols_order and not isinstance(op.usecols, list): # convert to Series, if usecols is a scalar df = df[op.usecols] else: if start == 0: # The first chunk contains header # As we specify names and dtype, we need to skip header rows csv_kwargs["skiprows"] = 1 if op.header == "infer" else op.header if op.usecols: usecols = op.usecols if isinstance(op.usecols, list) else [op.usecols] else: usecols = op.usecols if contain_arrow_dtype(dtypes): # when keep_default_na is True which is default, # will replace null value with np.nan, # which will cause failure when converting to arrow string array csv_kwargs["keep_default_na"] = False df = pd.read_csv( b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs, ) if op.keep_usecols_order: df = df[op.usecols] return df
https://github.com/mars-project/mars/issues/1604
In [9]: df = pd.DataFrame({ ...: 'col1': np.random.randint(0, 100, (100000,)), ...: 'col2': np.random.choice(['a', 'b', 'c'], (100000,)), ...: 'col3': np.arange(100000) ...: }) ...: df.iloc[-100:, :] = pd.NA In [10]: df.to_csv('test.csv', index=False) In [11]: md.read_csv('test.csv').execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-560f5a720bd9> in <module> ----> 1 md.read_csv('test.csv').execute() ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~/Documents/mars_dev/mars/mars/core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Documents/mars_dev/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 105 # set number of running cores 106 self.context.set_ncores(kw['n_parallel']) --> 107 res = self._executor.execute_tileables(tileables, **kw) 108 return res 109 ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 876 n_parallel=n_parallel or n_thread, 877 print_progress=print_progress, mock=mock, --> 878 chunk_result=chunk_result) 879 880 # update shape of tileable and its chunks whatever it's successful or not ~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 688 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 689 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 690 res = graph_execution.execute(retval) 691 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 692 if mock: ~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval) 569 # wait until all the futures completed 570 for future in executed_futures: --> 571 future.result() 572 573 if retval: ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 430 raise CancelledError() 431 elif self._state == FINISHED: --> 432 return self.__get_result() 433 else: 434 raise TimeoutError() ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op) 441 # so we pass the first operand's first output to Executor.handle 442 first_op = ops[0] --> 443 Executor.handle(first_op, results, self._mock) 444 445 # update maximal memory usage during execution ~/Documents/mars_dev/mars/mars/executor.py in handle(cls, op, results, mock) 639 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 640 try: --> 641 return runner(results, op) 642 except UFuncTypeError as e: 643 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op) 321 df = df[op.usecols] 322 else: --> 323 df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) 324 325 ctx[out_df.key] = df ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in _pandas_read_csv(cls, f, op) 272 csv_kwargs['keep_default_na'] = False 273 df = pd.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, --> 274 dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs) 275 if op.keep_usecols_order: 276 df = df[op.usecols] ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision) 674 ) 675 --> 676 return _read(filepath_or_buffer, kwds) 677 678 parser_f.__name__ = name ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds) 452 453 try: --> 454 data = parser.read(nrows) 455 finally: 456 parser.close() ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 1131 def read(self, nrows=None): 1132 nrows = _validate_integer("nrows", nrows) -> 1133 ret = self._engine.read(nrows) 1134 1135 # May alter columns / col_dict ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 2035 def read(self, nrows=None): 2036 try: -> 2037 data = self._reader.read(nrows) 2038 except StopIteration: 2039 if self._first_chunk: pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.read() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_rows() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_column_data() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_tokens() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_with_dtype() ValueError: Integer column has NA values in column 0
ValueError
def _cudf_read_csv(cls, op): # pragma: no cover if op.usecols: usecols = op.usecols if isinstance(op.usecols, list) else [op.usecols] else: usecols = op.usecols csv_kwargs = op.extra_params if op.offset == 0: df = cudf.read_csv( op.path, byte_range=(op.offset, op.size), sep=op.sep, usecols=usecols, **csv_kwargs, ) else: df = cudf.read_csv( op.path, byte_range=(op.offset, op.size), sep=op.sep, names=op.names, usecols=usecols, nrows=op.nrows, **csv_kwargs, ) if op.keep_usecols_order: df = df[op.usecols] return df
def _cudf_read_csv(cls, op): # pragma: no cover if op.usecols: usecols = op.usecols if isinstance(op.usecols, list) else [op.usecols] else: usecols = op.usecols csv_kwargs = op.extra_params if op.offset == 0: df = cudf.read_csv( op.path, byte_range=(op.offset, op.size), sep=op.sep, usecols=usecols, **csv_kwargs, ) else: df = cudf.read_csv( op.path, byte_range=(op.offset, op.size), sep=op.sep, names=op.names, usecols=usecols, dtype=cls._validate_dtypes(op.outputs[0].dtypes, op.gpu), nrows=op.nrows, **csv_kwargs, ) if op.keep_usecols_order: df = df[op.usecols] return df
https://github.com/mars-project/mars/issues/1604
In [9]: df = pd.DataFrame({ ...: 'col1': np.random.randint(0, 100, (100000,)), ...: 'col2': np.random.choice(['a', 'b', 'c'], (100000,)), ...: 'col3': np.arange(100000) ...: }) ...: df.iloc[-100:, :] = pd.NA In [10]: df.to_csv('test.csv', index=False) In [11]: md.read_csv('test.csv').execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-560f5a720bd9> in <module> ----> 1 md.read_csv('test.csv').execute() ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~/Documents/mars_dev/mars/mars/core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Documents/mars_dev/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 105 # set number of running cores 106 self.context.set_ncores(kw['n_parallel']) --> 107 res = self._executor.execute_tileables(tileables, **kw) 108 return res 109 ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 876 n_parallel=n_parallel or n_thread, 877 print_progress=print_progress, mock=mock, --> 878 chunk_result=chunk_result) 879 880 # update shape of tileable and its chunks whatever it's successful or not ~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 688 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 689 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 690 res = graph_execution.execute(retval) 691 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 692 if mock: ~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval) 569 # wait until all the futures completed 570 for future in executed_futures: --> 571 future.result() 572 573 if retval: ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 430 raise CancelledError() 431 elif self._state == FINISHED: --> 432 return self.__get_result() 433 else: 434 raise TimeoutError() ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op) 441 # so we pass the first operand's first output to Executor.handle 442 first_op = ops[0] --> 443 Executor.handle(first_op, results, self._mock) 444 445 # update maximal memory usage during execution ~/Documents/mars_dev/mars/mars/executor.py in handle(cls, op, results, mock) 639 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 640 try: --> 641 return runner(results, op) 642 except UFuncTypeError as e: 643 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op) 321 df = df[op.usecols] 322 else: --> 323 df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) 324 325 ctx[out_df.key] = df ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in _pandas_read_csv(cls, f, op) 272 csv_kwargs['keep_default_na'] = False 273 df = pd.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, --> 274 dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs) 275 if op.keep_usecols_order: 276 df = df[op.usecols] ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision) 674 ) 675 --> 676 return _read(filepath_or_buffer, kwds) 677 678 parser_f.__name__ = name ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds) 452 453 try: --> 454 data = parser.read(nrows) 455 finally: 456 parser.close() ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 1131 def read(self, nrows=None): 1132 nrows = _validate_integer("nrows", nrows) -> 1133 ret = self._engine.read(nrows) 1134 1135 # May alter columns / col_dict ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 2035 def read(self, nrows=None): 2036 try: -> 2037 data = self._reader.read(nrows) 2038 except StopIteration: 2039 if self._first_chunk: pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.read() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_rows() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_column_data() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_tokens() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_with_dtype() ValueError: Integer column has NA values in column 0
ValueError
def execute(cls, ctx, op): xdf = cudf if op.gpu else pd out_df = op.outputs[0] csv_kwargs = op.extra_params.copy() with open_file( op.path, compression=op.compression, storage_options=op.storage_options ) as f: if op.compression is not None: # As we specify names and dtype, we need to skip header rows csv_kwargs["skiprows"] = 1 if op.header == "infer" else op.header dtypes = op.outputs[0].dtypes if contain_arrow_dtype(dtypes): # when keep_default_na is True which is default, # will replace null value with np.nan, # which will cause failure when converting to arrow string array csv_kwargs["keep_default_na"] = False csv_kwargs["dtype"] = cls._select_arrow_dtype(dtypes) df = xdf.read_csv( f, sep=op.sep, names=op.names, index_col=op.index_col, usecols=op.usecols, nrows=op.nrows, **csv_kwargs, ) if op.keep_usecols_order: df = df[op.usecols] else: df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) ctx[out_df.key] = df
def execute(cls, ctx, op): xdf = cudf if op.gpu else pd out_df = op.outputs[0] csv_kwargs = op.extra_params.copy() with open_file( op.path, compression=op.compression, storage_options=op.storage_options ) as f: if op.compression is not None: # As we specify names and dtype, we need to skip header rows csv_kwargs["skiprows"] = 1 if op.header == "infer" else op.header dtypes = cls._validate_dtypes(op.outputs[0].dtypes, op.gpu) if contain_arrow_dtype(dtypes.values()): # when keep_default_na is True which is default, # will replace null value with np.nan, # which will cause failure when converting to arrow string array csv_kwargs["keep_default_na"] = False df = xdf.read_csv( f, sep=op.sep, names=op.names, index_col=op.index_col, usecols=op.usecols, dtype=dtypes, nrows=op.nrows, **csv_kwargs, ) if op.keep_usecols_order: df = df[op.usecols] else: df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) ctx[out_df.key] = df
https://github.com/mars-project/mars/issues/1604
In [9]: df = pd.DataFrame({ ...: 'col1': np.random.randint(0, 100, (100000,)), ...: 'col2': np.random.choice(['a', 'b', 'c'], (100000,)), ...: 'col3': np.arange(100000) ...: }) ...: df.iloc[-100:, :] = pd.NA In [10]: df.to_csv('test.csv', index=False) In [11]: md.read_csv('test.csv').execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-560f5a720bd9> in <module> ----> 1 md.read_csv('test.csv').execute() ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~/Documents/mars_dev/mars/mars/core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Documents/mars_dev/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 105 # set number of running cores 106 self.context.set_ncores(kw['n_parallel']) --> 107 res = self._executor.execute_tileables(tileables, **kw) 108 return res 109 ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 876 n_parallel=n_parallel or n_thread, 877 print_progress=print_progress, mock=mock, --> 878 chunk_result=chunk_result) 879 880 # update shape of tileable and its chunks whatever it's successful or not ~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 688 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 689 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 690 res = graph_execution.execute(retval) 691 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 692 if mock: ~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval) 569 # wait until all the futures completed 570 for future in executed_futures: --> 571 future.result() 572 573 if retval: ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 430 raise CancelledError() 431 elif self._state == FINISHED: --> 432 return self.__get_result() 433 else: 434 raise TimeoutError() ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op) 441 # so we pass the first operand's first output to Executor.handle 442 first_op = ops[0] --> 443 Executor.handle(first_op, results, self._mock) 444 445 # update maximal memory usage during execution ~/Documents/mars_dev/mars/mars/executor.py in handle(cls, op, results, mock) 639 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 640 try: --> 641 return runner(results, op) 642 except UFuncTypeError as e: 643 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op) 321 df = df[op.usecols] 322 else: --> 323 df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) 324 325 ctx[out_df.key] = df ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in _pandas_read_csv(cls, f, op) 272 csv_kwargs['keep_default_na'] = False 273 df = pd.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, --> 274 dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs) 275 if op.keep_usecols_order: 276 df = df[op.usecols] ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision) 674 ) 675 --> 676 return _read(filepath_or_buffer, kwds) 677 678 parser_f.__name__ = name ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds) 452 453 try: --> 454 data = parser.read(nrows) 455 finally: 456 parser.close() ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 1131 def read(self, nrows=None): 1132 nrows = _validate_integer("nrows", nrows) -> 1133 ret = self._engine.read(nrows) 1134 1135 # May alter columns / col_dict ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 2035 def read(self, nrows=None): 2036 try: -> 2037 data = self._reader.read(nrows) 2038 except StopIteration: 2039 if self._first_chunk: pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.read() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_rows() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_column_data() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_tokens() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_with_dtype() ValueError: Integer column has NA values in column 0
ValueError
def agg(groupby, func, method="auto", *args, **kwargs): """ Aggregate using one or more operations on grouped data. Parameters ---------- groupby : Mars Groupby Groupby data. func : str or list-like Aggregation functions. method : {'auto', 'shuffle', 'tree'}, default 'auto' 'tree' method provide a better performance, 'shuffle' is recommended if aggregated result is very large, 'auto' will use 'shuffle' method in distributed mode and use 'tree' in local mode. Returns ------- Series or DataFrame Aggregated result. """ # When perform a computation on the grouped data, we won't shuffle # the data in the stage of groupby and do shuffle after aggregation. if not isinstance(groupby, GROUPBY_TYPE): raise TypeError(f"Input should be type of groupby, not {type(groupby)}") if method not in ["shuffle", "tree", "auto"]: raise ValueError( f"Method {method} is not available, please specify 'tree' or 'shuffle" ) if not _check_if_func_available(func): return groupby.transform(func, *args, _call_agg=True, **kwargs) agg_op = DataFrameGroupByAgg( func=func, method=method, raw_func=func, groupby_params=groupby.op.groupby_params, ) return agg_op(groupby)
def agg(groupby, func, method="auto", *args, **kwargs): """ Aggregate using one or more operations on grouped data. :param groupby: Groupby data. :param func: Aggregation functions. :param method: 'shuffle' or 'tree', 'tree' method provide a better performance, 'shuffle' is recommended if aggregated result is very large, 'auto' will use 'shuffle' method in distributed mode and use 'tree' in local mode. :return: Aggregated result. """ # When perform a computation on the grouped data, we won't shuffle # the data in the stage of groupby and do shuffle after aggregation. if not isinstance(groupby, GROUPBY_TYPE): raise TypeError(f"Input should be type of groupby, not {type(groupby)}") if method not in ["shuffle", "tree", "auto"]: raise ValueError( f"Method {method} is not available, please specify 'tree' or 'shuffle" ) if not _check_if_func_available(func): return groupby.transform(func, *args, _call_agg=True, **kwargs) agg_op = DataFrameGroupByAgg( func=func, method=method, raw_func=func, groupby_params=groupby.op.groupby_params, ) return agg_op(groupby)
https://github.com/mars-project/mars/issues/1604
In [9]: df = pd.DataFrame({ ...: 'col1': np.random.randint(0, 100, (100000,)), ...: 'col2': np.random.choice(['a', 'b', 'c'], (100000,)), ...: 'col3': np.arange(100000) ...: }) ...: df.iloc[-100:, :] = pd.NA In [10]: df.to_csv('test.csv', index=False) In [11]: md.read_csv('test.csv').execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-560f5a720bd9> in <module> ----> 1 md.read_csv('test.csv').execute() ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~/Documents/mars_dev/mars/mars/core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Documents/mars_dev/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 105 # set number of running cores 106 self.context.set_ncores(kw['n_parallel']) --> 107 res = self._executor.execute_tileables(tileables, **kw) 108 return res 109 ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 876 n_parallel=n_parallel or n_thread, 877 print_progress=print_progress, mock=mock, --> 878 chunk_result=chunk_result) 879 880 # update shape of tileable and its chunks whatever it's successful or not ~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 688 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 689 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 690 res = graph_execution.execute(retval) 691 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 692 if mock: ~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval) 569 # wait until all the futures completed 570 for future in executed_futures: --> 571 future.result() 572 573 if retval: ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 430 raise CancelledError() 431 elif self._state == FINISHED: --> 432 return self.__get_result() 433 else: 434 raise TimeoutError() ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op) 441 # so we pass the first operand's first output to Executor.handle 442 first_op = ops[0] --> 443 Executor.handle(first_op, results, self._mock) 444 445 # update maximal memory usage during execution ~/Documents/mars_dev/mars/mars/executor.py in handle(cls, op, results, mock) 639 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 640 try: --> 641 return runner(results, op) 642 except UFuncTypeError as e: 643 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op) 321 df = df[op.usecols] 322 else: --> 323 df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) 324 325 ctx[out_df.key] = df ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in _pandas_read_csv(cls, f, op) 272 csv_kwargs['keep_default_na'] = False 273 df = pd.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, --> 274 dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs) 275 if op.keep_usecols_order: 276 df = df[op.usecols] ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision) 674 ) 675 --> 676 return _read(filepath_or_buffer, kwds) 677 678 parser_f.__name__ = name ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds) 452 453 try: --> 454 data = parser.read(nrows) 455 finally: 456 parser.close() ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 1131 def read(self, nrows=None): 1132 nrows = _validate_integer("nrows", nrows) -> 1133 ret = self._engine.read(nrows) 1134 1135 # May alter columns / col_dict ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 2035 def read(self, nrows=None): 2036 try: -> 2037 data = self._reader.read(nrows) 2038 except StopIteration: 2039 if self._first_chunk: pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.read() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_rows() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_column_data() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_tokens() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_with_dtype() ValueError: Integer column has NA values in column 0
ValueError
def dataframe_sort_values( df, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ignore_index=False, parallel_kind="PSRS", psrs_kinds=None, ): """ Sort by the values along either axis. Parameters ---------- df : Mars DataFrame Input dataframe. by : str Name or list of names to sort by. axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. parallel_kind : {'PSRS'}, default 'PSRS' Parallel sorting algorithm, for the details, refer to: http://csweb.cs.wfu.edu/bigiron/LittleFE-PSRS/build/html/PSRSalgorithm.html Returns ------- sorted_obj : DataFrame or None DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> import mars.dataframe as md >>> df = md.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df.execute() col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 3 NaN 8 4 4 D 7 2 5 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']).execute() col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']).execute() col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False).execute() col1 col2 col3 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 3 NaN 8 4 Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first').execute() col1 col2 col3 3 NaN 8 4 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 """ if na_position not in ["last", "first"]: # pragma: no cover raise TypeError(f"invalid na_position: {na_position}") axis = validate_axis(axis, df) if axis != 0: raise NotImplementedError("Only support sort on axis 0") psrs_kinds = _validate_sort_psrs_kinds(psrs_kinds) by = by if isinstance(by, (list, tuple)) else [by] op = DataFrameSortValues( by=by, axis=axis, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, ignore_index=ignore_index, parallel_kind=parallel_kind, psrs_kinds=psrs_kinds, output_types=[OutputType.dataframe], ) sorted_df = op(df) if inplace: df.data = sorted_df.data else: return sorted_df
def dataframe_sort_values( df, by, axis=0, ascending=True, inplace=False, kind="quicksort", na_position="last", ignore_index=False, parallel_kind="PSRS", psrs_kinds=None, ): """ Sort by the values along either axis. :param df: input DataFrame. :param by: Name or list of names to sort by. :param axis: Axis to be sorted. :param ascending: Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. :param inplace: If True, perform operation in-place. :param kind: Choice of sorting algorithm. See also ndarray.np.sort for more information. mergesort is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. :param na_position: Puts NaNs at the beginning if first; last puts NaNs at the end. :param ignore_index: If True, the resulting axis will be labeled 0, 1, …, n - 1. :param parallel_kind: {'PSRS'}, optional. Parallel sorting algorithm, for the details, refer to: http://csweb.cs.wfu.edu/bigiron/LittleFE-PSRS/build/html/PSRSalgorithm.html :param psrs_kinds: Sorting algorithms during PSRS algorithm. :return: sorted dataframe. Examples -------- >>> import mars.dataframe as md >>> raw = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df = md.DataFrame(raw) >>> df.execute() col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 3 NaN 8 4 4 D 7 2 5 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']).execute() col1 col2 col3 0 A 2 0 1 A 1 1 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']).execute() col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 NaN 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False).execute() col1 col2 col3 4 D 7 2 5 C 4 3 2 B 9 9 0 A 2 0 1 A 1 1 3 NaN 8 4 """ if na_position not in ["last", "first"]: # pragma: no cover raise TypeError(f"invalid na_position: {na_position}") axis = validate_axis(axis, df) if axis != 0: raise NotImplementedError("Only support sort on axis 0") psrs_kinds = _validate_sort_psrs_kinds(psrs_kinds) by = by if isinstance(by, (list, tuple)) else [by] op = DataFrameSortValues( by=by, axis=axis, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, ignore_index=ignore_index, parallel_kind=parallel_kind, psrs_kinds=psrs_kinds, output_types=[OutputType.dataframe], ) sorted_df = op(df) if inplace: df.data = sorted_df.data else: return sorted_df
https://github.com/mars-project/mars/issues/1604
In [9]: df = pd.DataFrame({ ...: 'col1': np.random.randint(0, 100, (100000,)), ...: 'col2': np.random.choice(['a', 'b', 'c'], (100000,)), ...: 'col3': np.arange(100000) ...: }) ...: df.iloc[-100:, :] = pd.NA In [10]: df.to_csv('test.csv', index=False) In [11]: md.read_csv('test.csv').execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-560f5a720bd9> in <module> ----> 1 md.read_csv('test.csv').execute() ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~/Documents/mars_dev/mars/mars/core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Documents/mars_dev/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 105 # set number of running cores 106 self.context.set_ncores(kw['n_parallel']) --> 107 res = self._executor.execute_tileables(tileables, **kw) 108 return res 109 ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 876 n_parallel=n_parallel or n_thread, 877 print_progress=print_progress, mock=mock, --> 878 chunk_result=chunk_result) 879 880 # update shape of tileable and its chunks whatever it's successful or not ~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 688 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 689 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 690 res = graph_execution.execute(retval) 691 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 692 if mock: ~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval) 569 # wait until all the futures completed 570 for future in executed_futures: --> 571 future.result() 572 573 if retval: ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 430 raise CancelledError() 431 elif self._state == FINISHED: --> 432 return self.__get_result() 433 else: 434 raise TimeoutError() ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op) 441 # so we pass the first operand's first output to Executor.handle 442 first_op = ops[0] --> 443 Executor.handle(first_op, results, self._mock) 444 445 # update maximal memory usage during execution ~/Documents/mars_dev/mars/mars/executor.py in handle(cls, op, results, mock) 639 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 640 try: --> 641 return runner(results, op) 642 except UFuncTypeError as e: 643 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op) 321 df = df[op.usecols] 322 else: --> 323 df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) 324 325 ctx[out_df.key] = df ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in _pandas_read_csv(cls, f, op) 272 csv_kwargs['keep_default_na'] = False 273 df = pd.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, --> 274 dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs) 275 if op.keep_usecols_order: 276 df = df[op.usecols] ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision) 674 ) 675 --> 676 return _read(filepath_or_buffer, kwds) 677 678 parser_f.__name__ = name ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds) 452 453 try: --> 454 data = parser.read(nrows) 455 finally: 456 parser.close() ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 1131 def read(self, nrows=None): 1132 nrows = _validate_integer("nrows", nrows) -> 1133 ret = self._engine.read(nrows) 1134 1135 # May alter columns / col_dict ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 2035 def read(self, nrows=None): 2036 try: -> 2037 data = self._reader.read(nrows) 2038 except StopIteration: 2039 if self._first_chunk: pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.read() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_rows() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_column_data() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_tokens() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_with_dtype() ValueError: Integer column has NA values in column 0
ValueError
def einsum( subscripts, *operands, dtype=None, order="K", casting="safe", optimize=False ): """ Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In *implicit* mode `einsum` computes these values. In *explicit* mode, `einsum` provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels. See the notes and examples for clarification. Parameters ---------- subscripts : str Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator '->' is included as well as subscript labels of the precise output form. operands : list of array_like These are the arrays for the operation. dtype : {data-type, None}, optional If provided, forces the calculation to use the data type specified. Note that you may have to also give a more liberal `casting` parameter to allow the conversions. Default is None. order : {'C', 'F', 'A', 'K'}, optional Controls the memory layout of the output. 'C' means it should be C contiguous. 'F' means it should be Fortran contiguous, 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. 'K' means it should be as close to the layout as the inputs as is possible, including arbitrarily permuted axes. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. Default is 'safe'. optimize : {False, True, 'greedy', 'optimal'}, optional Controls if intermediate optimization should occur. No optimization will occur if False and True will default to the 'greedy' algorithm. Also accepts an explicit contraction list from the ``np.einsum_path`` function. See ``np.einsum_path`` for more details. Defaults to False. Returns ------- output : Mars.tensor The calculation based on the Einstein summation convention. The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. A non-exhaustive list of these operations, which can be computed by `einsum`, is shown below along with examples: * Trace of an array, :py:func:`numpy.trace`. * Return a diagonal, :py:func:`numpy.diag`. * Array axis summations, :py:func:`numpy.sum`. * Transpositions and permutations, :py:func:`numpy.transpose`. * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. * Tensor contractions, :py:func:`numpy.tensordot`. * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so ``mt.einsum('i,i', a, b)`` is equivalent to :py:func:`mt.inner(a,b) <mars.tensor.inner>`. If a label appears only once, it is not summed, so ``mt.einsum('i', a)`` produces a view of ``a`` with no changes. A further example ``mt.einsum('ij,jk', a, b)`` describes traditional matrix multiplication and is equivalent to :py:func:`mt.matmul(a,b) <mars.tensor.matmul>`. In *implicit mode*, the chosen subscripts are important since the axes of the output are reordered alphabetically. This means that ``mt.einsum('ij', a)`` doesn't affect a 2D array, while ``mt.einsum('ji', a)`` takes its transpose. Additionally, ``mt.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, ``mt.einsum('ij,jh', a, b)`` returns the transpose of the multiplication since subscript 'h' precedes subscript 'i'. In *explicit mode* the output can be directly controlled by specifying output subscript labels. This requires the identifier '->' as well as the list of output subscript labels. This feature increases the flexibility of the function since summing can be disabled or forced when required. The call ``mt.einsum('i->', a)`` is like :py:func:`mt.sum(a, axis=-1) <mars.tensor.sum>`, and ``mt.einsum('ii->i', a)`` is like :py:func:`mt.diag(a) <mars.tensor.diag>`. The difference is that `einsum` does not allow broadcasting by default. Additionally ``mt.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``mt.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``mt.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, one can do ``mt.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``mt.einsum('ii->i', a)`` produces a view (changed in version 1.10.0). `einsum` also provides an alternative way to provide the subscripts and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. If the output shape is not provided in this format `einsum` will be calculated in implicit mode, otherwise it will be performed explicitly. The examples below have corresponding `einsum` calls with the two parameter methods. Examples -------- >>> import mars.tensor as mt >>> a = mt.arange(25).reshape(5,5) >>> b = mt.arange(5) >>> c = mt.arange(6).reshape(2,3) Trace of a matrix: >>> mt.einsum('ii', a).execute() 60 >>> mt.einsum(a, [0,0]).execute() 60 Extract the diagonal (requires explicit form): >>> mt.einsum('ii->i', a).execute() array([ 0, 6, 12, 18, 24]) >>> mt.einsum(a, [0,0], [0]).execute() array([ 0, 6, 12, 18, 24]) >>> mt.diag(a).execute() array([ 0, 6, 12, 18, 24]) Sum over an axis (requires explicit form): >>> mt.einsum('ij->i', a).execute() array([ 10, 35, 60, 85, 110]) >>> mt.einsum(a, [0,1], [0]).execute() array([ 10, 35, 60, 85, 110]) >>> mt.sum(a, axis=1).execute() array([ 10, 35, 60, 85, 110]) For higher dimensional arrays summing a single axis can be done with ellipsis: >>> mt.einsum('...j->...', a).execute() array([ 10, 35, 60, 85, 110]) >>> mt.einsum(a, [Ellipsis,1], [Ellipsis]).execute() array([ 10, 35, 60, 85, 110]) Compute a matrix transpose, or reorder any number of axes: >>> mt.einsum('ji', c).execute() array([[0, 3], [1, 4], [2, 5]]) >>> mt.einsum('ij->ji', c).execute() array([[0, 3], [1, 4], [2, 5]]) >>> mt.einsum(c, [1,0]).execute() array([[0, 3], [1, 4], [2, 5]]) >>> mt.transpose(c).execute() array([[0, 3], [1, 4], [2, 5]]) Vector inner products: >>> mt.einsum('i,i', b, b).execute() 30 >>> mt.einsum(b, [0], b, [0]).execute() 30 >>> mt.inner(b,b).execute() 30 Matrix vector multiplication: >>> mt.einsum('ij,j', a, b).execute() array([ 30, 80, 130, 180, 230]) >>> mt.einsum(a, [0,1], b, [1]).execute() array([ 30, 80, 130, 180, 230]) >>> mt.dot(a, b).execute() array([ 30, 80, 130, 180, 230]) >>> mt.einsum('...j,j', a, b).execute() array([ 30, 80, 130, 180, 230]) Broadcasting and scalar multiplication: >>> mt.einsum('..., ...', 3, c).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) >>> mt.einsum(',ij', 3, c).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) >>> mt.einsum(3, [Ellipsis], c, [Ellipsis]).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) >>> mt.multiply(3, c).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) Vector outer product: >>> mt.einsum('i,j', mt.arange(2)+1, b).execute() array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> mt.einsum(mt.arange(2)+1, [0], b, [1]).execute() array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> mt.outer(mt.arange(2)+1, b).execute() array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) Tensor contraction: >>> a = mt.arange(60.).reshape(3,4,5) >>> b = mt.arange(24.).reshape(4,3,2) >>> mt.einsum('ijk,jil->kl', a, b).execute() array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) >>> mt.einsum(a, [0,1,2], b, [1,0,3], [2,3]).execute() array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) >>> mt.tensordot(a,b, axes=([1,0],[0,1])).execute() array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) Writeable returned arrays (since version 1.10.0): >>> a = mt.zeros((3, 3)) >>> mt.einsum('ii->i', a)[:] = 1 >>> a.execute() array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) Example of ellipsis use: >>> a = mt.arange(6).reshape((3,2)) >>> b = mt.arange(12).reshape((4,3)) >>> mt.einsum('ki,jk->ij', a, b).execute() array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> mt.einsum('ki,...k->i...', a, b).execute() array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> mt.einsum('k...,jk', a, b).execute() array([[10, 28, 46, 64], [13, 40, 67, 94]]) Chained array operations. For more complicated contractions, speed ups might be achieved by repeatedly computing a 'greedy' path or pre-computing the 'optimal' path and repeatedly applying it, using an `einsum_path` insertion (since version 1.12.0). Performance improvements can be particularly significant with larger arrays: >>> a = mt.ones(64).reshape(2,4,8) Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) Sub-optimal `einsum` (due to repeated path calculation time): ~330ms >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') Greedy `einsum` (faster optimal path approximation): ~160ms >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') Optimal `einsum` (best usage pattern in some use cases): ~110ms >>> path = mt.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) """ all_inputs = [subscripts] + list(operands) inputs, outputs, operands = parse_einsum_input(all_inputs) subscripts = "->".join((inputs, outputs)) axes_shape = dict() for axes, op in zip(inputs.split(","), operands): for ax, s in zip(axes, op.shape): axes_shape[ax] = s if optimize: optimize, _ = einsum_path(*all_inputs, optimize=optimize) shape = tuple(axes_shape[ax] for ax in outputs) op = TensorEinsum( subscripts=subscripts, optimize=optimize, dtype=dtype or operands[0].dtype, order=order, casting=casting, ) return op(operands, shape)
def einsum( subscripts, *operands, dtype=None, order="K", casting="safe", optimize=False ): """ Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In *implicit* mode `einsum` computes these values. In *explicit* mode, `einsum` provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels. See the notes and examples for clarification. :param subscripts: Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator ‘->’ is included as well as subscript labels of the precise output form. :param operands: These are the arrays for the operation. :param dtype: If provided, forces the calculation to use the data type specified. Note that you may have to also give a more liberal casting parameter to allow the conversions. Default is None. :param order: Controls the memory layout of the output. :param casting: Controls what kind of data casting may occur. Setting this to ‘unsafe’ is not recommended, as it can adversely affect accumulations. :param optimize: Controls if intermediate optimization should occur. :return: The calculation based on the Einstein summation convention. The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. A non-exhaustive list of these operations, which can be computed by `einsum`, is shown below along with examples: * Trace of an array, :py:func:`numpy.trace`. * Return a diagonal, :py:func:`numpy.diag`. * Array axis summations, :py:func:`numpy.sum`. * Transpositions and permutations, :py:func:`numpy.transpose`. * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. * Tensor contractions, :py:func:`numpy.tensordot`. * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so ``mt.einsum('i,i', a, b)`` is equivalent to :py:func:`mt.inner(a,b) <mars.tensor.inner>`. If a label appears only once, it is not summed, so ``mt.einsum('i', a)`` produces a view of ``a`` with no changes. A further example ``mt.einsum('ij,jk', a, b)`` describes traditional matrix multiplication and is equivalent to :py:func:`mt.matmul(a,b) <mars.tensor.matmul>`. In *implicit mode*, the chosen subscripts are important since the axes of the output are reordered alphabetically. This means that ``mt.einsum('ij', a)`` doesn't affect a 2D array, while ``mt.einsum('ji', a)`` takes its transpose. Additionally, ``mt.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, ``mt.einsum('ij,jh', a, b)`` returns the transpose of the multiplication since subscript 'h' precedes subscript 'i'. In *explicit mode* the output can be directly controlled by specifying output subscript labels. This requires the identifier '->' as well as the list of output subscript labels. This feature increases the flexibility of the function since summing can be disabled or forced when required. The call ``mt.einsum('i->', a)`` is like :py:func:`mt.sum(a, axis=-1) <mars.tensor.sum>`, and ``mt.einsum('ii->i', a)`` is like :py:func:`mt.diag(a) <mars.tensor.diag>`. The difference is that `einsum` does not allow broadcasting by default. Additionally ``mt.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``mt.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``mt.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, one can do ``mt.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``mt.einsum('ii->i', a)`` produces a view (changed in version 1.10.0). `einsum` also provides an alternative way to provide the subscripts and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. If the output shape is not provided in this format `einsum` will be calculated in implicit mode, otherwise it will be performed explicitly. The examples below have corresponding `einsum` calls with the two parameter methods. Examples -------- >>> import mars.tensor as mt >>> a = mt.arange(25).reshape(5,5) >>> b = mt.arange(5) >>> c = mt.arange(6).reshape(2,3) Trace of a matrix: >>> mt.einsum('ii', a).execute() 60 >>> mt.einsum(a, [0,0]).execute() 60 Extract the diagonal (requires explicit form): >>> mt.einsum('ii->i', a).execute() array([ 0, 6, 12, 18, 24]) >>> mt.einsum(a, [0,0], [0]).execute() array([ 0, 6, 12, 18, 24]) >>> mt.diag(a).execute() array([ 0, 6, 12, 18, 24]) Sum over an axis (requires explicit form): >>> mt.einsum('ij->i', a).execute() array([ 10, 35, 60, 85, 110]) >>> mt.einsum(a, [0,1], [0]).execute() array([ 10, 35, 60, 85, 110]) >>> mt.sum(a, axis=1).execute() array([ 10, 35, 60, 85, 110]) For higher dimensional arrays summing a single axis can be done with ellipsis: >>> mt.einsum('...j->...', a).execute() array([ 10, 35, 60, 85, 110]) >>> mt.einsum(a, [Ellipsis,1], [Ellipsis]).execute() array([ 10, 35, 60, 85, 110]) Compute a matrix transpose, or reorder any number of axes: >>> mt.einsum('ji', c).execute() array([[0, 3], [1, 4], [2, 5]]) >>> mt.einsum('ij->ji', c).execute() array([[0, 3], [1, 4], [2, 5]]) >>> mt.einsum(c, [1,0]).execute() array([[0, 3], [1, 4], [2, 5]]) >>> mt.transpose(c).execute() array([[0, 3], [1, 4], [2, 5]]) Vector inner products: >>> mt.einsum('i,i', b, b).execute() 30 >>> mt.einsum(b, [0], b, [0]).execute() 30 >>> mt.inner(b,b).execute() 30 Matrix vector multiplication: >>> mt.einsum('ij,j', a, b).execute() array([ 30, 80, 130, 180, 230]) >>> mt.einsum(a, [0,1], b, [1]).execute() array([ 30, 80, 130, 180, 230]) >>> mt.dot(a, b).execute() array([ 30, 80, 130, 180, 230]) >>> mt.einsum('...j,j', a, b).execute() array([ 30, 80, 130, 180, 230]) Broadcasting and scalar multiplication: >>> mt.einsum('..., ...', 3, c).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) >>> mt.einsum(',ij', 3, c).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) >>> mt.einsum(3, [Ellipsis], c, [Ellipsis]).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) >>> mt.multiply(3, c).execute() array([[ 0, 3, 6], [ 9, 12, 15]]) Vector outer product: >>> mt.einsum('i,j', mt.arange(2)+1, b).execute() array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> mt.einsum(mt.arange(2)+1, [0], b, [1]).execute() array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) >>> mt.outer(mt.arange(2)+1, b).execute() array([[0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]) Tensor contraction: >>> a = mt.arange(60.).reshape(3,4,5) >>> b = mt.arange(24.).reshape(4,3,2) >>> mt.einsum('ijk,jil->kl', a, b).execute() array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) >>> mt.einsum(a, [0,1,2], b, [1,0,3], [2,3]).execute() array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) >>> mt.tensordot(a,b, axes=([1,0],[0,1])).execute() array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) Writeable returned arrays (since version 1.10.0): >>> a = mt.zeros((3, 3)) >>> mt.einsum('ii->i', a)[:] = 1 >>> a.execute() array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) Example of ellipsis use: >>> a = mt.arange(6).reshape((3,2)) >>> b = mt.arange(12).reshape((4,3)) >>> mt.einsum('ki,jk->ij', a, b).execute() array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> mt.einsum('ki,...k->i...', a, b).execute() array([[10, 28, 46, 64], [13, 40, 67, 94]]) >>> mt.einsum('k...,jk', a, b).execute() array([[10, 28, 46, 64], [13, 40, 67, 94]]) Chained array operations. For more complicated contractions, speed ups might be achieved by repeatedly computing a 'greedy' path or pre-computing the 'optimal' path and repeatedly applying it, using an `einsum_path` insertion (since version 1.12.0). Performance improvements can be particularly significant with larger arrays: >>> a = mt.ones(64).reshape(2,4,8) Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) Sub-optimal `einsum` (due to repeated path calculation time): ~330ms >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') Greedy `einsum` (faster optimal path approximation): ~160ms >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') Optimal `einsum` (best usage pattern in some use cases): ~110ms >>> path = mt.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] >>> for iteration in range(500): ... _ = mt.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) """ all_inputs = [subscripts] + list(operands) inputs, outputs, operands = parse_einsum_input(all_inputs) subscripts = "->".join((inputs, outputs)) axes_shape = dict() for axes, op in zip(inputs.split(","), operands): for ax, s in zip(axes, op.shape): axes_shape[ax] = s if optimize: optimize, _ = einsum_path(*all_inputs, optimize=optimize) shape = tuple(axes_shape[ax] for ax in outputs) op = TensorEinsum( subscripts=subscripts, optimize=optimize, dtype=dtype or operands[0].dtype, order=order, casting=casting, ) return op(operands, shape)
https://github.com/mars-project/mars/issues/1604
In [9]: df = pd.DataFrame({ ...: 'col1': np.random.randint(0, 100, (100000,)), ...: 'col2': np.random.choice(['a', 'b', 'c'], (100000,)), ...: 'col3': np.arange(100000) ...: }) ...: df.iloc[-100:, :] = pd.NA In [10]: df.to_csv('test.csv', index=False) In [11]: md.read_csv('test.csv').execute() --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-560f5a720bd9> in <module> ----> 1 md.read_csv('test.csv').execute() ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 626 627 if wait: --> 628 return run() 629 else: 630 thread_executor = ThreadPoolExecutor(1) ~/Documents/mars_dev/mars/mars/core.py in run() 622 623 def run(): --> 624 self.data.execute(session, **kw) 625 return self 626 ~/Documents/mars_dev/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Documents/mars_dev/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 476 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 477 for t in tileables) --> 478 result = self._sess.run(*tileables, **kw) 479 480 for t in tileables: ~/Documents/mars_dev/mars/mars/session.py in run(self, *tileables, **kw) 105 # set number of running cores 106 self.context.set_ncores(kw['n_parallel']) --> 107 res = self._executor.execute_tileables(tileables, **kw) 108 return res 109 ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 876 n_parallel=n_parallel or n_thread, 877 print_progress=print_progress, mock=mock, --> 878 chunk_result=chunk_result) 879 880 # update shape of tileable and its chunks whatever it's successful or not ~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 688 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 689 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 690 res = graph_execution.execute(retval) 691 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 692 if mock: ~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval) 569 # wait until all the futures completed 570 for future in executed_futures: --> 571 future.result() 572 573 if retval: ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 430 raise CancelledError() 431 elif self._state == FINISHED: --> 432 return self.__get_result() 433 else: 434 raise TimeoutError() ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/envs/py3.7.2/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Documents/mars_dev/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op) 441 # so we pass the first operand's first output to Executor.handle 442 first_op = ops[0] --> 443 Executor.handle(first_op, results, self._mock) 444 445 # update maximal memory usage during execution ~/Documents/mars_dev/mars/mars/executor.py in handle(cls, op, results, mock) 639 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 640 try: --> 641 return runner(results, op) 642 except UFuncTypeError as e: 643 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in execute(cls, ctx, op) 321 df = df[op.usecols] 322 else: --> 323 df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) 324 325 ctx[out_df.key] = df ~/Documents/mars_dev/mars/mars/dataframe/datasource/read_csv.py in _pandas_read_csv(cls, f, op) 272 csv_kwargs['keep_default_na'] = False 273 df = pd.read_csv(b, sep=op.sep, names=op.names, index_col=op.index_col, usecols=usecols, --> 274 dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs) 275 if op.keep_usecols_order: 276 df = df[op.usecols] ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision) 674 ) 675 --> 676 return _read(filepath_or_buffer, kwds) 677 678 parser_f.__name__ = name ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds) 452 453 try: --> 454 data = parser.read(nrows) 455 finally: 456 parser.close() ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 1131 def read(self, nrows=None): 1132 nrows = _validate_integer("nrows", nrows) -> 1133 ret = self._engine.read(nrows) 1134 1135 # May alter columns / col_dict ~/miniconda3/envs/py3.7.2/lib/python3.7/site-packages/pandas/io/parsers.py in read(self, nrows) 2035 def read(self, nrows=None): 2036 try: -> 2037 data = self._reader.read(nrows) 2038 except StopIteration: 2039 if self._first_chunk: pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader.read() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._read_rows() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_column_data() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_tokens() pandas/_libs/parsers.pyx in pandas._libs.parsers.TextReader._convert_with_dtype() ValueError: Integer column has NA values in column 0
ValueError
def _wrap_train_tuple(cls, data, label, sample_weight=None, init_score=None): data = cls._convert_tileable(data) label = cls._convert_tileable(label) sample_weight = cls._convert_tileable(sample_weight) init_score = cls._convert_tileable(init_score) return TrainTuple(data, label, sample_weight, init_score)
def _wrap_train_tuple(data, label, sample_weight=None, init_score=None): return TrainTuple(data, label, sample_weight, init_score)
https://github.com/mars-project/mars/issues/1605
In [1]: from mars.learn.contrib import lightgbm as lgb /Users/qinxuye/miniconda3/envs/mars3.6/lib/python3.6/site-packages/lightgbm/__init__.py:48: UserWarning: Starting from version 2.2.1, the library file in distribution wheels for macOS is built by the Apple Clang (Xcode_8.3.3) compiler. This means that in case of installing LightGBM from PyPI via the ``pip install lightgbm`` command, you don't need to install the gcc compiler anymore. Instead of that, you need to install the OpenMP library, which is required for running LightGBM on the system with the Apple Clang compiler. You can install the OpenMP library by the following command: ``brew install libomp``. "You can install the OpenMP library by the following command: ``brew install libomp``.", UserWarning) In [2]: lg_reg = lgb.LGBMRegressor(colsample_bytree=0.3, learning_rate=0.1, ...: max_depth=5, reg_alpha=10, n_estimators=10) In [3]: from sklearn.datasets import make_classification In [4]: x, y = make_classification() In [6]: lg_reg.fit(x, y) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-6-649d7fa4c388> in <module> ----> 1 lg_reg.fit(x, y) ~/Workspace/mars/mars/learn/contrib/lightgbm/regressor.py in fit(self, X, y, sample_weight, init_score, eval_set, eval_sample_weight, eval_init_score, session, run_kwargs, **kwargs) 30 eval_sets=self._wrap_eval_tuples(eval_set, eval_sample_weight, eval_init_score), 31 model_type=LGBMModelType.REGRESSOR, ---> 32 session=session, run_kwargs=run_kwargs, **kwargs) 33 34 self.set_params(**model.get_params()) ~/Workspace/mars/mars/learn/contrib/lightgbm/train.py in train(params, train_set, eval_sets, **kwargs) 323 base_port = kwargs.pop('base_port', None) 324 --> 325 aligns = align_data_set(train_set) 326 for eval_set in eval_sets: 327 aligns += align_data_set(eval_set) ~/Workspace/mars/mars/learn/contrib/lightgbm/align.py in align_data_set(dataset) 104 105 def align_data_set(dataset): --> 106 out_types = get_output_types(dataset.data, dataset.label, dataset.sample_weight, dataset.init_score) 107 op = LGBMAlign(data=dataset.data, label=dataset.label, sample_weight=dataset.sample_weight, 108 init_score=dataset.init_score, output_types=out_types) ~/Workspace/mars/mars/core.py in get_output_types(unknown_as, *objs) 891 output_types.append(unknown_as) 892 else: # pragma: no cover --> 893 raise TypeError('Output can only be tensor, dataframe or series') 894 return output_types TypeError: Output can only be tensor, dataframe or series
TypeError
def predict(self, X, **kw): session = kw.pop("session", None) run_kwargs = kw.pop("run_kwargs", None) X = self._convert_tileable(X) return predict(self, X, session=session, run_kwargs=run_kwargs, **kw)
def predict(self, X, **kw): session = kw.pop("session", None) run_kwargs = kw.pop("run_kwargs", None) return predict(self, X, session=session, run_kwargs=run_kwargs, **kw)
https://github.com/mars-project/mars/issues/1605
In [1]: from mars.learn.contrib import lightgbm as lgb /Users/qinxuye/miniconda3/envs/mars3.6/lib/python3.6/site-packages/lightgbm/__init__.py:48: UserWarning: Starting from version 2.2.1, the library file in distribution wheels for macOS is built by the Apple Clang (Xcode_8.3.3) compiler. This means that in case of installing LightGBM from PyPI via the ``pip install lightgbm`` command, you don't need to install the gcc compiler anymore. Instead of that, you need to install the OpenMP library, which is required for running LightGBM on the system with the Apple Clang compiler. You can install the OpenMP library by the following command: ``brew install libomp``. "You can install the OpenMP library by the following command: ``brew install libomp``.", UserWarning) In [2]: lg_reg = lgb.LGBMRegressor(colsample_bytree=0.3, learning_rate=0.1, ...: max_depth=5, reg_alpha=10, n_estimators=10) In [3]: from sklearn.datasets import make_classification In [4]: x, y = make_classification() In [6]: lg_reg.fit(x, y) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-6-649d7fa4c388> in <module> ----> 1 lg_reg.fit(x, y) ~/Workspace/mars/mars/learn/contrib/lightgbm/regressor.py in fit(self, X, y, sample_weight, init_score, eval_set, eval_sample_weight, eval_init_score, session, run_kwargs, **kwargs) 30 eval_sets=self._wrap_eval_tuples(eval_set, eval_sample_weight, eval_init_score), 31 model_type=LGBMModelType.REGRESSOR, ---> 32 session=session, run_kwargs=run_kwargs, **kwargs) 33 34 self.set_params(**model.get_params()) ~/Workspace/mars/mars/learn/contrib/lightgbm/train.py in train(params, train_set, eval_sets, **kwargs) 323 base_port = kwargs.pop('base_port', None) 324 --> 325 aligns = align_data_set(train_set) 326 for eval_set in eval_sets: 327 aligns += align_data_set(eval_set) ~/Workspace/mars/mars/learn/contrib/lightgbm/align.py in align_data_set(dataset) 104 105 def align_data_set(dataset): --> 106 out_types = get_output_types(dataset.data, dataset.label, dataset.sample_weight, dataset.init_score) 107 op = LGBMAlign(data=dataset.data, label=dataset.label, sample_weight=dataset.sample_weight, 108 init_score=dataset.init_score, output_types=out_types) ~/Workspace/mars/mars/core.py in get_output_types(unknown_as, *objs) 891 output_types.append(unknown_as) 892 else: # pragma: no cover --> 893 raise TypeError('Output can only be tensor, dataframe or series') 894 return output_types TypeError: Output can only be tensor, dataframe or series
TypeError
def kill_process_tree(pid, include_parent=True): try: import psutil except ImportError: # pragma: no cover return try: proc = psutil.Process(pid) except psutil.NoSuchProcess: return plasma_sock_dir = None try: children = proc.children(recursive=True) except psutil.NoSuchProcess: # pragma: no cover return if include_parent: children.append(proc) for p in children: try: if "plasma" in p.name(): plasma_sock_dir = next( ( conn.laddr for conn in p.connections("unix") if "plasma" in conn.laddr ), None, ) p.kill() except psutil.NoSuchProcess: # pragma: no cover pass if plasma_sock_dir: shutil.rmtree(plasma_sock_dir, ignore_errors=True)
def kill_process_tree(pid, include_parent=True): try: import psutil except ImportError: # pragma: no cover return try: proc = psutil.Process(pid) except psutil.NoSuchProcess: return plasma_sock_dir = None children = proc.children(recursive=True) if include_parent: children.append(proc) for p in children: try: if "plasma" in p.name(): plasma_sock_dir = next( ( conn.laddr for conn in p.connections("unix") if "plasma" in conn.laddr ), None, ) p.kill() except psutil.NoSuchProcess: # pragma: no cover pass if plasma_sock_dir: shutil.rmtree(plasma_sock_dir, ignore_errors=True)
https://github.com/mars-project/mars/issues/1605
In [1]: from mars.learn.contrib import lightgbm as lgb /Users/qinxuye/miniconda3/envs/mars3.6/lib/python3.6/site-packages/lightgbm/__init__.py:48: UserWarning: Starting from version 2.2.1, the library file in distribution wheels for macOS is built by the Apple Clang (Xcode_8.3.3) compiler. This means that in case of installing LightGBM from PyPI via the ``pip install lightgbm`` command, you don't need to install the gcc compiler anymore. Instead of that, you need to install the OpenMP library, which is required for running LightGBM on the system with the Apple Clang compiler. You can install the OpenMP library by the following command: ``brew install libomp``. "You can install the OpenMP library by the following command: ``brew install libomp``.", UserWarning) In [2]: lg_reg = lgb.LGBMRegressor(colsample_bytree=0.3, learning_rate=0.1, ...: max_depth=5, reg_alpha=10, n_estimators=10) In [3]: from sklearn.datasets import make_classification In [4]: x, y = make_classification() In [6]: lg_reg.fit(x, y) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-6-649d7fa4c388> in <module> ----> 1 lg_reg.fit(x, y) ~/Workspace/mars/mars/learn/contrib/lightgbm/regressor.py in fit(self, X, y, sample_weight, init_score, eval_set, eval_sample_weight, eval_init_score, session, run_kwargs, **kwargs) 30 eval_sets=self._wrap_eval_tuples(eval_set, eval_sample_weight, eval_init_score), 31 model_type=LGBMModelType.REGRESSOR, ---> 32 session=session, run_kwargs=run_kwargs, **kwargs) 33 34 self.set_params(**model.get_params()) ~/Workspace/mars/mars/learn/contrib/lightgbm/train.py in train(params, train_set, eval_sets, **kwargs) 323 base_port = kwargs.pop('base_port', None) 324 --> 325 aligns = align_data_set(train_set) 326 for eval_set in eval_sets: 327 aligns += align_data_set(eval_set) ~/Workspace/mars/mars/learn/contrib/lightgbm/align.py in align_data_set(dataset) 104 105 def align_data_set(dataset): --> 106 out_types = get_output_types(dataset.data, dataset.label, dataset.sample_weight, dataset.init_score) 107 op = LGBMAlign(data=dataset.data, label=dataset.label, sample_weight=dataset.sample_weight, 108 init_score=dataset.init_score, output_types=out_types) ~/Workspace/mars/mars/core.py in get_output_types(unknown_as, *objs) 891 output_types.append(unknown_as) 892 else: # pragma: no cover --> 893 raise TypeError('Output can only be tensor, dataframe or series') 894 return output_types TypeError: Output can only be tensor, dataframe or series
TypeError
def post_create(self): from ..dispatcher import DispatchActor from ..status import StatusActor super().post_create() self.register_actors_down_handler() self._dispatch_ref = self.promise_ref(DispatchActor.default_uid()) parse_num, is_percent = parse_readable_size(options.worker.min_spill_size) self._min_spill_size = int( self._size_limit * parse_num if is_percent else parse_num ) parse_num, is_percent = parse_readable_size(options.worker.max_spill_size) self._max_spill_size = int( self._size_limit * parse_num if is_percent else parse_num ) status_ref = self.ctx.actor_ref(StatusActor.default_uid()) self._status_ref = status_ref if self.ctx.has_actor(status_ref) else None self._storage_handler = self.storage_client.get_storage_handler( self._storage_device.build_location(self.proc_id) ) self.ref().update_cache_status(_tell=True)
def post_create(self): from ..dispatcher import DispatchActor from ..status import StatusActor super().post_create() self.register_actors_down_handler() self._dispatch_ref = self.promise_ref(DispatchActor.default_uid()) parse_num, is_percent = parse_readable_size(options.worker.min_spill_size) self._min_spill_size = int( self._size_limit * parse_num if is_percent else parse_num ) parse_num, is_percent = parse_readable_size(options.worker.max_spill_size) self._max_spill_size = int( self._size_limit * parse_num if is_percent else parse_num ) status_ref = self.ctx.actor_ref(StatusActor.default_uid()) self._status_ref = status_ref if self.ctx.has_actor(status_ref) else None self._storage_handler = self.storage_client.get_storage_handler( self._storage_device.build_location(self.proc_id) )
https://github.com/mars-project/mars/issues/1605
In [1]: from mars.learn.contrib import lightgbm as lgb /Users/qinxuye/miniconda3/envs/mars3.6/lib/python3.6/site-packages/lightgbm/__init__.py:48: UserWarning: Starting from version 2.2.1, the library file in distribution wheels for macOS is built by the Apple Clang (Xcode_8.3.3) compiler. This means that in case of installing LightGBM from PyPI via the ``pip install lightgbm`` command, you don't need to install the gcc compiler anymore. Instead of that, you need to install the OpenMP library, which is required for running LightGBM on the system with the Apple Clang compiler. You can install the OpenMP library by the following command: ``brew install libomp``. "You can install the OpenMP library by the following command: ``brew install libomp``.", UserWarning) In [2]: lg_reg = lgb.LGBMRegressor(colsample_bytree=0.3, learning_rate=0.1, ...: max_depth=5, reg_alpha=10, n_estimators=10) In [3]: from sklearn.datasets import make_classification In [4]: x, y = make_classification() In [6]: lg_reg.fit(x, y) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-6-649d7fa4c388> in <module> ----> 1 lg_reg.fit(x, y) ~/Workspace/mars/mars/learn/contrib/lightgbm/regressor.py in fit(self, X, y, sample_weight, init_score, eval_set, eval_sample_weight, eval_init_score, session, run_kwargs, **kwargs) 30 eval_sets=self._wrap_eval_tuples(eval_set, eval_sample_weight, eval_init_score), 31 model_type=LGBMModelType.REGRESSOR, ---> 32 session=session, run_kwargs=run_kwargs, **kwargs) 33 34 self.set_params(**model.get_params()) ~/Workspace/mars/mars/learn/contrib/lightgbm/train.py in train(params, train_set, eval_sets, **kwargs) 323 base_port = kwargs.pop('base_port', None) 324 --> 325 aligns = align_data_set(train_set) 326 for eval_set in eval_sets: 327 aligns += align_data_set(eval_set) ~/Workspace/mars/mars/learn/contrib/lightgbm/align.py in align_data_set(dataset) 104 105 def align_data_set(dataset): --> 106 out_types = get_output_types(dataset.data, dataset.label, dataset.sample_weight, dataset.init_score) 107 op = LGBMAlign(data=dataset.data, label=dataset.label, sample_weight=dataset.sample_weight, 108 init_score=dataset.init_score, output_types=out_types) ~/Workspace/mars/mars/core.py in get_output_types(unknown_as, *objs) 891 output_types.append(unknown_as) 892 else: # pragma: no cover --> 893 raise TypeError('Output can only be tensor, dataframe or series') 894 return output_types TypeError: Output can only be tensor, dataframe or series
TypeError
def _tile_chunks(cls, op, in_tensor, faiss_index, n_sample): """ If the distribution on each chunk is the same, refer to: https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-distribute-index-building-on-several-machines 1. train an IndexIVF* on a representative sample of the data, store it. 2. for each node, load the trained index, add the local data to it, store the resulting populated index 3. on a central node, load all the populated indexes and merge them. """ faiss_index_ = faiss.index_factory( in_tensor.shape[1], faiss_index, op.faiss_metric_type ) # Training on sample data when two conditions meet # 1. the index type requires for training, e.g. Flat does not require # 2. distributions of chunks are the same, in not, # train separately on each chunk data need_sample_train = not faiss_index_.is_trained and op.same_distribution need_merge_index = ( hasattr(faiss_index_, "merge_from") if need_sample_train else False ) train_chunk = None if need_sample_train: # sample data to train rs = RandomState(op.seed) sampled_index = rs.choice( in_tensor.shape[0], size=n_sample, replace=False, chunk_size=n_sample ) sample_tensor = recursive_tile(in_tensor[sampled_index]) assert len(sample_tensor.chunks) == 1 sample_chunk = sample_tensor.chunks[0] train_op = FaissTrainSampledIndex( faiss_index=faiss_index, metric=op.metric, return_index_type=op.return_index_type, ) train_chunk = train_op.new_chunk([sample_chunk]) elif op.gpu: # pragma: no cover # if not need train, and on gpu, just merge data together to train in_tensor = in_tensor.rechunk(in_tensor.shape)._inplace_tile() # build index for each input chunk build_index_chunks = [] for i, chunk in enumerate(in_tensor.chunks): build_index_op = op.copy().reset_key() build_index_op._stage = OperandStage.map build_index_op._faiss_index = faiss_index if train_chunk is not None: build_index_chunk = build_index_op.new_chunk( [chunk, train_chunk], index=(i,) ) else: build_index_chunk = build_index_op.new_chunk([chunk], index=(i,)) build_index_chunks.append(build_index_chunk) out_chunks = [] if need_merge_index: assert op.n_sample is not None # merge all indices into one, do only when trained on sample data out_chunk_op = op.copy().reset_key() out_chunk_op._faiss_index = faiss_index out_chunk_op._stage = OperandStage.agg out_chunk = out_chunk_op.new_chunk(build_index_chunks, index=(0,)) out_chunks.append(out_chunk) else: out_chunks.extend(build_index_chunks) new_op = op.copy() return new_op.new_tileables( op.inputs, chunks=out_chunks, nsplits=((len(out_chunks),),) )
def _tile_chunks(cls, op, in_tensor, faiss_index, n_sample): """ If the distribution on each chunk is the same, refer to: https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-distribute-index-building-on-several-machines 1. train an IndexIVF* on a representative sample of the data, store it. 2. for each node, load the trained index, add the local data to it, store the resulting populated index 3. on a central node, load all the populated indexes and merge them. """ faiss_index_ = faiss.index_factory( in_tensor.shape[1], faiss_index, op.faiss_metric_type ) # Training on sample data when two conditions meet # 1. the index type requires for training, e.g. Flat does not require # 2. distributions of chunks are the same, in not, # train separately on each chunk data need_sample_train = not faiss_index_.is_trained and op.same_distribution train_chunk = None if need_sample_train: # sample data to train rs = RandomState(op.seed) sampled_index = rs.choice( in_tensor.shape[0], size=n_sample, replace=False, chunk_size=n_sample ) sample_tensor = recursive_tile(in_tensor[sampled_index]) assert len(sample_tensor.chunks) == 1 sample_chunk = sample_tensor.chunks[0] train_op = FaissTrainSampledIndex( faiss_index=faiss_index, metric=op.metric, return_index_type=op.return_index_type, ) train_chunk = train_op.new_chunk([sample_chunk]) elif op.gpu: # pragma: no cover # if not need train, and on gpu, just merge data together to train in_tensor = in_tensor.rechunk(in_tensor.shape)._inplace_tile() # build index for each input chunk build_index_chunks = [] for i, chunk in enumerate(in_tensor.chunks): build_index_op = op.copy().reset_key() build_index_op._stage = OperandStage.map build_index_op._faiss_index = faiss_index if train_chunk is not None: build_index_chunk = build_index_op.new_chunk( [chunk, train_chunk], index=(i,) ) else: build_index_chunk = build_index_op.new_chunk([chunk], index=(i,)) build_index_chunks.append(build_index_chunk) out_chunks = [] if need_sample_train: assert op.n_sample is not None # merge all indices into one, do only when trained on sample data out_chunk_op = op.copy().reset_key() out_chunk_op._faiss_index = faiss_index out_chunk_op._stage = OperandStage.agg out_chunk = out_chunk_op.new_chunk(build_index_chunks, index=(0,)) out_chunks.append(out_chunk) else: out_chunks.extend(build_index_chunks) new_op = op.copy() return new_op.new_tileables( op.inputs, chunks=out_chunks, nsplits=((len(out_chunks),),) )
https://github.com/mars-project/mars/issues/1608
In [1]: from sklearn.datasets import make_classification In [2]: x, y = make_classification() In [3]: import mars.tensor as mt /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) In [4]: x = mt.tensor(x, chunk_size=20) In [5]: x.shape Out[5]: (100, 20) In [6]: y = mt.tensor(y, chunk_size=20) In [7]: y.shape Out[7]: (100,) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) In [8]: from mars.learn.neighbors._faiss import build_faiss_index In [39]: index = build_faiss_index(x, index_name='PCAR6,IVF8_HNSW32,SQ8', n_samp ...: le=10) In [40]: index.execute() WARNING clustering 10 points to 8 centroids: please provide at least 312 training points --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-40-bd4069985a62> in <module> ----> 1 index.execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 433 raise CancelledError() 434 elif self._state == FINISHED: --> 435 return self.__get_result() 436 else: 437 raise TimeoutError() ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/learn/neighbors/_faiss.py in execute(cls, ctx, op) 340 cls._execute_map(ctx, op) 341 elif op.stage == OperandStage.agg: --> 342 cls._execute_agg(ctx, op) 343 else: 344 assert op.stage is None ~/Workspace/mars/mars/learn/neighbors/_faiss.py in _execute_agg(cls, ctx, op) 327 index = _load_index(ctx, op, index, device_id) 328 indexes.append(index) --> 329 assert hasattr(index, 'merge_from') 330 if merged_index is None: 331 merged_index = index AssertionError:
AssertionError
def _execute_one_chunk(cls, ctx, op): (inp,), device_id, xp = as_same_device( [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True ) with device(device_id): inp = inp.astype(np.float32, copy=False) # create index index = faiss.index_factory(inp.shape[1], op.faiss_index, op.faiss_metric_type) # GPU if device_id >= 0: # pragma: no cover index = _index_to_gpu(index, device_id) # train index if not index.is_trained: assert op.n_sample is not None sample_indices = xp.random.choice( inp.shape[0], size=op.n_sample, replace=False ) sampled = inp[sample_indices] index.train(sampled) if op.metric == "cosine": # faiss does not support cosine distances directly, # data needs to be normalize before adding to index, # refer to: # https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance faiss.normalize_L2(inp) # add vectors to index if device_id >= 0: # pragma: no cover # gpu index.add_c(inp.shape[0], _swig_ptr_from_cupy_float32_array(inp)) else: index.add(inp) ctx[op.outputs[0].key] = _store_index(ctx, op, index, device_id)
def _execute_one_chunk(cls, ctx, op): (inp,), device_id, xp = as_same_device( [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True ) with device(device_id): # create index index = faiss.index_factory(inp.shape[1], op.faiss_index, op.faiss_metric_type) # GPU if device_id >= 0: # pragma: no cover index = _index_to_gpu(index, device_id) # train index if not index.is_trained: assert op.n_sample is not None sample_indices = xp.random.choice( inp.shape[0], size=op.n_sample, replace=False ) sampled = inp[sample_indices] index.train(sampled) if op.metric == "cosine": # faiss does not support cosine distances directly, # data needs to be normalize before adding to index, # refer to: # https://github.com/facebookresearch/faiss/wiki/FAQ#how-can-i-index-vectors-for-cosine-distance faiss.normalize_L2(inp) # add vectors to index if device_id >= 0: # pragma: no cover # gpu inp = inp.astype(np.float32, copy=False) index.add_c(inp.shape[0], _swig_ptr_from_cupy_float32_array(inp)) else: index.add(inp) ctx[op.outputs[0].key] = _store_index(ctx, op, index, device_id)
https://github.com/mars-project/mars/issues/1608
In [1]: from sklearn.datasets import make_classification In [2]: x, y = make_classification() In [3]: import mars.tensor as mt /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) In [4]: x = mt.tensor(x, chunk_size=20) In [5]: x.shape Out[5]: (100, 20) In [6]: y = mt.tensor(y, chunk_size=20) In [7]: y.shape Out[7]: (100,) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) /Users/qinxuye/miniconda3/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject return f(*args, **kwds) In [8]: from mars.learn.neighbors._faiss import build_faiss_index In [39]: index = build_faiss_index(x, index_name='PCAR6,IVF8_HNSW32,SQ8', n_samp ...: le=10) In [40]: index.execute() WARNING clustering 10 points to 8 centroids: please provide at least 312 training points --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-40-bd4069985a62> in <module> ----> 1 index.execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 373 374 if wait: --> 375 return run() 376 else: 377 # leverage ThreadPoolExecutor to submit task, ~/Workspace/mars/mars/core.py in run() 368 def run(): 369 # no more fetch, thus just fire run --> 370 session.run(self, **kw) 371 # return Tileable or ExecutableTuple itself 372 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 498 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 499 for t in tileables) --> 500 result = self._sess.run(*tileables, **kw) 501 502 for t in tileables: ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 106 # set number of running cores 107 self.context.set_ncores(kw['n_parallel']) --> 108 res = self._executor.execute_tileables(tileables, **kw) 109 return res 110 ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 433 raise CancelledError() 434 elif self._state == FINISHED: --> 435 return self.__get_result() 436 else: 437 raise TimeoutError() ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/executor.py in handle_op(self, *args, **kw) 376 377 def handle_op(self, *args, **kw): --> 378 return Executor.handle(*args, **kw) 379 380 def _order_starts(self): ~/Workspace/mars/mars/executor.py in handle(cls, op, results, mock) 642 # The `UFuncTypeError` was introduced by numpy#12593 since v1.17.0. 643 try: --> 644 return runner(results, op) 645 except UFuncTypeError as e: 646 raise TypeError(str(e)).with_traceback(sys.exc_info()[2]) from None ~/Workspace/mars/mars/learn/neighbors/_faiss.py in execute(cls, ctx, op) 340 cls._execute_map(ctx, op) 341 elif op.stage == OperandStage.agg: --> 342 cls._execute_agg(ctx, op) 343 else: 344 assert op.stage is None ~/Workspace/mars/mars/learn/neighbors/_faiss.py in _execute_agg(cls, ctx, op) 327 index = _load_index(ctx, op, index, device_id) 328 indexes.append(index) --> 329 assert hasattr(index, 'merge_from') 330 if merged_index is None: 331 merged_index = index AssertionError:
AssertionError
def _make_indexable(iterable): """Ensure iterable supports indexing or convert to an indexable variant. Convert sparse matrices to csr and other non-indexable iterable to arrays. Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged. Parameters ---------- iterable : {list, dataframe, array, sparse} or None Object to be converted to an indexable iterable. """ if issparse(iterable): return mt.tensor(iterable) elif hasattr(iterable, "iloc"): if iterable.ndim == 1: return md.Series(iterable) else: return md.DataFrame(iterable) elif hasattr(iterable, "__getitem__"): return mt.tensor(iterable) elif iterable is None: return iterable return mt.tensor(iterable)
def _make_indexable(iterable): """Ensure iterable supports indexing or convert to an indexable variant. Convert sparse matrices to csr and other non-indexable iterable to arrays. Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged. Parameters ---------- iterable : {list, dataframe, array, sparse} or None Object to be converted to an indexable iterable. """ if issparse(iterable): return mt.tensor(iterable) elif hasattr(iterable, "iloc"): return md.DataFrame(iterable) elif hasattr(iterable, "__getitem__"): return mt.tensor(iterable) elif iterable is None: return iterable return mt.tensor(iterable)
https://github.com/mars-project/mars/issues/1603
In [1]: import mars.dataframe as md In [8]: X = df[['userId', 'rating']] In [9]: y = df['movieId'] In [11]: train_test_split(X, y, train_size=0.7, random_state=0) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-94d1aec833af> in <module> ----> 1 train_test_split(X, y, train_size=0.7, random_state=0) ~/Workspace/mars/mars/learn/model_selection/_split.py in train_test_split(*arrays, **options) 112 raise TypeError(f"Invalid parameters passed: {options}") 113 --> 114 arrays = indexable(*arrays, session=session, run_kwargs=run_kwargs) 115 116 n_samples = _num_samples(arrays[0]) ~/Workspace/mars/mars/learn/utils/validation.py in indexable(session, run_kwargs, *iterables) 139 List of objects to ensure sliceability. 140 """ --> 141 result = [_make_indexable(X) for X in iterables] 142 check_consistent_length(*result, session=session, 143 run_kwargs=run_kwargs) ~/Workspace/mars/mars/learn/utils/validation.py in <listcomp>(.0) 139 List of objects to ensure sliceability. 140 """ --> 141 result = [_make_indexable(X) for X in iterables] 142 check_consistent_length(*result, session=session, 143 run_kwargs=run_kwargs) ~/Workspace/mars/mars/learn/utils/validation.py in _make_indexable(iterable) 119 return mt.tensor(iterable) 120 elif hasattr(iterable, "iloc"): --> 121 return md.DataFrame(iterable) 122 elif hasattr(iterable, "__getitem__"): 123 return mt.tensor(iterable) ~/Workspace/mars/mars/dataframe/initializer.py in __init__(self, data, index, columns, dtype, copy, chunk_size, gpu, sparse) 55 columns=columns, gpu=gpu, sparse=sparse) 56 else: ---> 57 pdf = pd.DataFrame(data, index=index, columns=columns, dtype=dtype, copy=copy) 58 df = from_pandas_df(pdf, chunk_size=chunk_size, gpu=gpu, sparse=sparse) 59 super().__init__(df.data) ~/miniconda3/lib/python3.7/site-packages/pandas/core/frame.py in __init__(self, data, index, columns, dtype, copy) 527 else: 528 if index is None or columns is None: --> 529 raise ValueError("DataFrame constructor not properly called!") 530 531 if not dtype: ValueError: DataFrame constructor not properly called!
ValueError
def execute(cls, ctx, op: "LGBMTrain"): if op.merge: return super().execute(ctx, op) from lightgbm.basic import _safe_call, _LIB data_val = ctx[op.data.key] label_val = ctx[op.label.key] sample_weight_val = ( ctx[op.sample_weight.key] if op.sample_weight is not None else None ) init_score_val = ctx[op.init_score.key] if op.init_score is not None else None if op.eval_datas is None: eval_set, eval_sample_weight, eval_init_score = None, None, None else: eval_set, eval_sample_weight, eval_init_score = [], [], [] for data, label in zip(op.eval_datas, op.eval_labels): eval_set.append((ctx[data.key], ctx[label.key])) for weight in op.eval_sample_weights: eval_sample_weight.append(ctx[weight.key] if weight is not None else None) for score in op.eval_init_scores: eval_init_score.append(ctx[score.key] if score is not None else None) eval_set = eval_set or None eval_sample_weight = eval_sample_weight or None eval_init_score = eval_init_score or None params = op.params.copy() # if model is trained, remove unsupported parameters params.pop("out_dtype_", None) if ctx.running_mode == RunningMode.distributed: params["machines"] = ",".join(op.lgbm_endpoints) params["time_out"] = op.timeout params["num_machines"] = len(op.lgbm_endpoints) params["local_listen_port"] = op.lgbm_port if (op.tree_learner or "").lower() not in {"data", "feature", "voting"}: logger.warning( "Parameter tree_learner not set or set to incorrect value " f'{op.tree_learner}, using "data" as default' ) params["tree_learner"] = "data" else: params["tree_learner"] = op.tree_learner try: model_cls = get_model_cls_from_type(op.model_type) model = model_cls(**params) model.fit( data_val, label_val, sample_weight=sample_weight_val, init_score=init_score_val, eval_set=eval_set, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, **op.kwds, ) if ( op.model_type == LGBMModelType.RANKER or op.model_type == LGBMModelType.REGRESSOR ): model.set_params(out_dtype_=np.dtype("float")) elif hasattr(label_val, "dtype"): model.set_params(out_dtype_=label_val.dtype) else: model.set_params(out_dtype_=label_val.dtypes[0]) ctx[op.outputs[0].key] = pickle.dumps(model) finally: _safe_call(_LIB.LGBM_NetworkFree())
def execute(cls, ctx, op: "LGBMTrain"): if op.merge: return super().execute(ctx, op) from lightgbm.basic import _safe_call, _LIB data_val = ctx[op.data.key] label_val = ctx[op.label.key] sample_weight_val = ( ctx[op.sample_weight.key] if op.sample_weight is not None else None ) init_score_val = ctx[op.init_score.key] if op.init_score is not None else None if op.eval_datas is None: eval_set, eval_sample_weight, eval_init_score = None, None, None else: eval_set, eval_sample_weight, eval_init_score = [], [], [] for data, label in zip(op.eval_datas, op.eval_labels): eval_set.append((ctx[data.key], ctx[label.key])) for weight in op.eval_sample_weights: eval_sample_weight.append(ctx[weight.key] if weight is not None else None) for score in op.eval_init_scores: eval_init_score.append(ctx[score.key] if score is not None else None) eval_set = eval_set or None eval_sample_weight = eval_sample_weight or None eval_init_score = eval_init_score or None params = op.params.copy() if ctx.running_mode == RunningMode.distributed: params["machines"] = ",".join(op.lgbm_endpoints) params["time_out"] = op.timeout params["num_machines"] = len(op.lgbm_endpoints) params["local_listen_port"] = op.lgbm_port if (op.tree_learner or "").lower() not in {"data", "feature", "voting"}: logger.warning( "Parameter tree_learner not set or set to incorrect value " f'{op.tree_learner}, using "data" as default' ) params["tree_learner"] = "data" else: params["tree_learner"] = op.tree_learner try: model_cls = get_model_cls_from_type(op.model_type) model = model_cls(**params) model.fit( data_val, label_val, sample_weight=sample_weight_val, init_score=init_score_val, eval_set=eval_set, eval_sample_weight=eval_sample_weight, eval_init_score=eval_init_score, **op.kwds, ) if ( op.model_type == LGBMModelType.RANKER or op.model_type == LGBMModelType.REGRESSOR ): model.set_params(out_dtype_=np.dtype("float")) elif hasattr(label_val, "dtype"): model.set_params(out_dtype_=label_val.dtype) else: model.set_params(out_dtype_=label_val.dtypes[0]) ctx[op.outputs[0].key] = pickle.dumps(model) finally: _safe_call(_LIB.LGBM_NetworkFree())
https://github.com/mars-project/mars/issues/1597
Attempt 4: Unexpected error TypeError occurred in executing operand affdad0be8e3430b7b6088cd112ed634 in 10.xxx:8083 Traceback (most recent call last): File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/promise.py", line 100, in _wrapped result = func(*args, **kwargs) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/worker/calc.py", line 299, in <lambda> .then(lambda context_dict: _start_calc(context_dict)) \ File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/worker/calc.py", line 273, in _start_calc return self._calc_results(session_id, graph_key, graph, context_dict, chunk_targets) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/worker/calc.py", line 197, in _calc_results chunk_targets, retval=False).result() File "src/gevent/event.py", line 383, in gevent._gevent_cevent.AsyncResult.result File "src/gevent/event.py", line 305, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 335, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 323, in gevent._gevent_cevent.AsyncResult.get File "src/gevent/event.py", line 303, in gevent._gevent_cevent.AsyncResult._raise_exception File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/gevent/_compat.py", line 65, in reraise raise value.with_traceback(tb) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/gevent/threadpool.py", line 142, in __run_task thread_result.set(func(*args, **kwargs)) File "mars/actors/pool/gevent_pool.pyx", line 127, in mars.actors.pool.gevent_pool.GeventThreadPool._wrap_watch.inner result = fn(*args, **kwargs) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/executor.py", line 690, in execute_graph res = graph_execution.execute(retval) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/executor.py", line 571, in execute future.result() File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/concurrent/futures/_base.py", line 435, in result return self.__get_result() File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result raise self._exception File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/executor.py", line 443, in _execute_operand Executor.handle(first_op, results, self._mock) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/executor.py", line 641, in handle return runner(results, op) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/mars/learn/contrib/lightgbm/train.py", line 298, in execute eval_init_score=eval_init_score, **op.kwds) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/lightgbm/sklearn.py", line 760, in fit callbacks=callbacks, init_model=init_model) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/lightgbm/sklearn.py", line 600, in fit callbacks=callbacks, init_model=init_model) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/lightgbm/engine.py", line 231, in train booster = Booster(params=params, train_set=train_set) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/lightgbm/basic.py", line 1983, in __init__ train_set.construct() File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/lightgbm/basic.py", line 1325, in construct categorical_feature=self.categorical_feature, params=self.params) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/lightgbm/basic.py", line 1102, in _lazy_init params_str = param_dict_to_str(params) File "/data/platform/anaconda3/envs/mars-dev/lib/python3.7/site-packages/lightgbm/basic.py", line 156, in param_dict_to_str % (key, type(val).__name__)) TypeError: Unknown type of parameter:out_dtype_, got:dtype
TypeError
def _calc_properties(cls, x1, x2=None, axis="columns"): if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): if x2 is None: dtypes = x1.dtypes elif pd.api.types.is_scalar(x2): dtypes = cls._operator(build_empty_df(x1.dtypes), x2).dtypes elif x1.dtypes is not None and isinstance(x2, TENSOR_TYPE): dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) else: dtypes = x1.dtypes return { "shape": x1.shape, "dtypes": dtypes, "columns_value": x1.columns_value, "index_value": x1.index_value, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): x2_dtype = x2.dtype if hasattr(x2, "dtype") else type(x2) dtype = infer_dtype(x1.dtype, np.dtype(x2_dtype), cls._operator) return {"shape": x1.shape, "dtype": dtype, "index_value": x1.index_value} if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE) ): index_shape, column_shape, dtypes, columns, index = ( np.nan, np.nan, None, None, None, ) if ( x1.columns_value is not None and x2.columns_value is not None and x1.columns_value.key == x2.columns_value.key ): dtypes = pd.Series( [ infer_dtype(dt1, dt2, cls._operator) for dt1, dt2 in zip(x1.dtypes, x2.dtypes) ], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) elif x1.dtypes is not None and x2.dtypes is not None: dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator) columns = parse_index(dtypes.index, store_data=True) columns.value.should_be_monotonic = True column_shape = len(dtypes) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): if axis == "columns" or axis == 1: index_shape = x1.shape[0] index = x1.index_value column_shape, dtypes, columns = np.nan, None, None if x1.columns_value is not None and x1.index_value is not None: if x1.columns_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) else: # pragma: no cover dtypes = x1.dtypes # FIXME columns = infer_index_value(x1.columns_value, x2.index_value) columns.value.should_be_monotonic = True column_shape = np.nan else: assert axis == "index" or axis == 0 column_shape = x1.shape[1] columns = x1.columns_value dtypes = x1.dtypes index_shape, index = np.nan, None if x1.index_value is not None and x1.index_value is not None: if x1.index_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: if x1.dtypes is not None: dtypes = pd.Series( [ infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes ], index=x1.dtypes.index, ) index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True index_shape = np.nan return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): index_shape, dtype, index = np.nan, None, None dtype = infer_dtype(x1.dtype, x2.dtype, cls._operator) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) return {"shape": (index_shape,), "dtype": dtype, "index_value": index} raise NotImplementedError("Unknown combination of parameters")
def _calc_properties(cls, x1, x2=None, axis="columns"): if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): if x2 is None: dtypes = x1.dtypes elif pd.api.types.is_scalar(x2): dtypes = infer_dtypes( x1.dtypes, pd.Series(np.array(x2).dtype), cls._operator ) elif x1.dtypes is not None and isinstance(x2, TENSOR_TYPE): dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) else: dtypes = x1.dtypes return { "shape": x1.shape, "dtypes": dtypes, "columns_value": x1.columns_value, "index_value": x1.index_value, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and ( x2 is None or pd.api.types.is_scalar(x2) or isinstance(x2, TENSOR_TYPE) ): x2_dtype = x2.dtype if hasattr(x2, "dtype") else type(x2) dtype = infer_dtype(x1.dtype, np.dtype(x2_dtype), cls._operator) return {"shape": x1.shape, "dtype": dtype, "index_value": x1.index_value} if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE) ): index_shape, column_shape, dtypes, columns, index = ( np.nan, np.nan, None, None, None, ) if ( x1.columns_value is not None and x2.columns_value is not None and x1.columns_value.key == x2.columns_value.key ): dtypes = pd.Series( [ infer_dtype(dt1, dt2, cls._operator) for dt1, dt2 in zip(x1.dtypes, x2.dtypes) ], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) elif x1.dtypes is not None and x2.dtypes is not None: dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator) columns = parse_index(dtypes.index, store_data=True) columns.value.should_be_monotonic = True column_shape = len(dtypes) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (DATAFRAME_TYPE, DATAFRAME_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): if axis == "columns" or axis == 1: index_shape = x1.shape[0] index = x1.index_value column_shape, dtypes, columns = np.nan, None, None if x1.columns_value is not None and x1.index_value is not None: if x1.columns_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) columns = copy.copy(x1.columns_value) columns.value.should_be_monotonic = False column_shape = len(dtypes) else: # pragma: no cover dtypes = x1.dtypes # FIXME columns = infer_index_value(x1.columns_value, x2.index_value) columns.value.should_be_monotonic = True column_shape = np.nan else: assert axis == "index" or axis == 0 column_shape = x1.shape[1] columns = x1.columns_value dtypes = x1.dtypes index_shape, index = np.nan, None if x1.index_value is not None and x1.index_value is not None: if x1.index_value.key == x2.index_value.key: dtypes = pd.Series( [infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes], index=x1.dtypes.index, ) index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: if x1.dtypes is not None: dtypes = pd.Series( [ infer_dtype(dt, x2.dtype, cls._operator) for dt in x1.dtypes ], index=x1.dtypes.index, ) index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True index_shape = np.nan return { "shape": (index_shape, column_shape), "dtypes": dtypes, "columns_value": columns, "index_value": index, } if isinstance(x1, (SERIES_TYPE, SERIES_CHUNK_TYPE)) and isinstance( x2, (SERIES_TYPE, SERIES_CHUNK_TYPE) ): index_shape, dtype, index = np.nan, None, None dtype = infer_dtype(x1.dtype, x2.dtype, cls._operator) if x1.index_value is not None and x2.index_value is not None: if x1.index_value.key == x2.index_value.key: index = copy.copy(x1.index_value) index.value.should_be_monotonic = False index_shape = x1.shape[0] else: index = infer_index_value(x1.index_value, x2.index_value) index.value.should_be_monotonic = True if index.key == x1.index_value.key == x2.index_value.key and ( not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0]) ): index_shape = ( x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0] ) return {"shape": (index_shape,), "dtype": dtype, "index_value": index} raise NotImplementedError("Unknown combination of parameters")
https://github.com/mars-project/mars/issues/1590
import numpy as np import pandas as pd import mars.dataframe as md rs = np.random.RandomState(0) raw_df = rs.rand(20, 10) raw_df = pd.DataFrame(np.where(raw_df > 0.4, raw_df, np.nan), columns=list('ABCDEFGHIJ')) df = md.DataFrame(raw_df, chunk_size=6) raw_df2 = rs.rand(20, 10) raw_df2 = pd.DataFrame(np.where(raw_df2 > 0.4, raw_df2, np.nan), columns=list('ABCDEFGHIJ')) df2 = md.DataFrame(raw_df2, chunk_size=4) sumv = (df ** 2).mul(df2, axis=1, fill_value=0).sum(axis=0).execute() Traceback (most recent call last): File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3417, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-16-f7addf55e6d4>", line 1, in <module> sumv = (df ** 2).mul(df2, axis=1, fill_value=0).sum(axis=0).execute() File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/__init__.py", line 118, in call_df_fill return func(df, other, axis=axis, level=level, fill_value=fill_value) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/multiply.py", line 48, in mul return op(df, other) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/core.py", line 499, in __call__ return self._call(x1, x2) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/core.py", line 479, in _call kw = self._calc_properties(df1, df2, axis=self.axis) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/core.py", line 326, in _calc_properties dtypes = pd.Series([infer_dtype(dt1, dt2, cls._operator) for dt1, dt2 File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/series.py", line 313, in __init__ raise ValueError( ValueError: Length of passed values is 10, index implies 11.
ValueError
def build_df(df_obj, fill_value=1, size=1): empty_df = build_empty_df(df_obj.dtypes, index=df_obj.index_value.to_pandas()[:0]) dtypes = empty_df.dtypes record = [_generate_value(dtype, fill_value) for dtype in dtypes] if len(record) != 0: # columns is empty in some cases if isinstance(empty_df.index, pd.MultiIndex): index = tuple( _generate_value(level.dtype, fill_value) for level in empty_df.index.levels ) empty_df = empty_df.reindex( index=pd.MultiIndex.from_tuples([index], names=empty_df.index.names) ) empty_df.iloc[0] = record else: index = _generate_value(empty_df.index.dtype, fill_value) empty_df.loc[index] = record empty_df = pd.concat([empty_df] * size) # make sure dtypes correct for MultiIndex for i, dtype in enumerate(dtypes.tolist()): s = empty_df.iloc[:, i] if not pd.api.types.is_dtype_equal(s.dtype, dtype): empty_df.iloc[:, i] = s.astype(dtype) return empty_df
def build_df(df_obj, fill_value=1, size=1): empty_df = build_empty_df(df_obj.dtypes, index=df_obj.index_value.to_pandas()[:0]) dtypes = empty_df.dtypes record = [_generate_value(dtype, fill_value) for dtype in dtypes] if isinstance(empty_df.index, pd.MultiIndex): index = tuple( _generate_value(level.dtype, fill_value) for level in empty_df.index.levels ) empty_df = empty_df.reindex( index=pd.MultiIndex.from_tuples([index], names=empty_df.index.names) ) empty_df.iloc[0] = record else: index = _generate_value(empty_df.index.dtype, fill_value) empty_df.loc[index] = record empty_df = pd.concat([empty_df] * size) # make sure dtypes correct for MultiIndex for i, dtype in enumerate(dtypes.tolist()): s = empty_df.iloc[:, i] if not pd.api.types.is_dtype_equal(s.dtype, dtype): empty_df.iloc[:, i] = s.astype(dtype) return empty_df
https://github.com/mars-project/mars/issues/1590
import numpy as np import pandas as pd import mars.dataframe as md rs = np.random.RandomState(0) raw_df = rs.rand(20, 10) raw_df = pd.DataFrame(np.where(raw_df > 0.4, raw_df, np.nan), columns=list('ABCDEFGHIJ')) df = md.DataFrame(raw_df, chunk_size=6) raw_df2 = rs.rand(20, 10) raw_df2 = pd.DataFrame(np.where(raw_df2 > 0.4, raw_df2, np.nan), columns=list('ABCDEFGHIJ')) df2 = md.DataFrame(raw_df2, chunk_size=4) sumv = (df ** 2).mul(df2, axis=1, fill_value=0).sum(axis=0).execute() Traceback (most recent call last): File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3417, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-16-f7addf55e6d4>", line 1, in <module> sumv = (df ** 2).mul(df2, axis=1, fill_value=0).sum(axis=0).execute() File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/__init__.py", line 118, in call_df_fill return func(df, other, axis=axis, level=level, fill_value=fill_value) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/multiply.py", line 48, in mul return op(df, other) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/core.py", line 499, in __call__ return self._call(x1, x2) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/core.py", line 479, in _call kw = self._calc_properties(df1, df2, axis=self.axis) File "/Users/wenjun.swj/Code/mars/mars/dataframe/arithmetic/core.py", line 326, in _calc_properties dtypes = pd.Series([infer_dtype(dt1, dt2, cls._operator) for dt1, dt2 File "/Users/wenjun.swj/miniconda3/lib/python3.8/site-packages/pandas/core/series.py", line 313, in __init__ raise ValueError( ValueError: Length of passed values is 10, index implies 11.
ValueError
def fetch(self, *tileables, **kw): ret_list = False if len(tileables) == 1 and isinstance(tileables[0], (tuple, list)): ret_list = True tileables = tileables[0] elif len(tileables) > 1: ret_list = True result = self._sess.fetch(*tileables, **kw) ret = [] for r, t in zip(result, tileables): if hasattr(t, "isscalar") and t.isscalar() and getattr(r, "size", None) == 1: ret.append(r.item()) else: ret.append(r) if ret_list: return ret return ret[0]
def fetch(self, *tileables, **kw): ret_list = False if len(tileables) == 1 and isinstance(tileables[0], (tuple, list)): ret_list = True tileables = tileables[0] elif len(tileables) > 1: ret_list = True result = self._sess.fetch(*tileables, **kw) ret = [] for r, t in zip(result, tileables): if hasattr(t, "isscalar") and t.isscalar() and hasattr(r, "item"): ret.append(r.item()) else: ret.append(r) if ret_list: return ret return ret[0]
https://github.com/mars-project/mars/issues/1580
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) ~/.local/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj) 700 type_pprinters=self.type_printers, 701 deferred_pprinters=self.deferred_printers) --> 702 printer.pretty(obj) 703 printer.flush() 704 return stream.getvalue() ~/.local/lib/python3.6/site-packages/IPython/lib/pretty.py in pretty(self, obj) 392 if cls is not object \ 393 and callable(cls.__dict__.get('__repr__')): --> 394 return _repr_pprint(obj, self, cycle) 395 396 return _default_pprint(obj, self, cycle) ~/.local/lib/python3.6/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle) 682 """A pprint that just redirects to the normal repr function.""" 683 # Find newlines and replace them with p.break_() --> 684 output = repr(obj) 685 lines = output.splitlines() 686 with p.group(): ~/.local/lib/python3.6/site-packages/mars/core.py in __repr__(self) 127 128 def __repr__(self): --> 129 return self._data.__repr__() 130 131 def _check_data(self, data): ~/.local/lib/python3.6/site-packages/mars/tensor/core.py in __repr__(self) 177 178 def __repr__(self): --> 179 return self._to_str(representation=True) 180 181 @property ~/.local/lib/python3.6/site-packages/mars/tensor/core.py in _to_str(self, representation) 165 threshold = print_options['threshold'] 166 --> 167 corner_data = fetch_corner_data(self, session=self._executed_sessions[-1]) 168 # if less than default threshold, just set it as default, 169 # if not, set to corner_data.size - 1 make sure ... exists in repr ~/.local/lib/python3.6/site-packages/mars/tensor/utils.py in fetch_corner_data(tensor, session) 824 return np.block(corners.tolist()) 825 else: --> 826 return tensor.fetch(session=session) ~/.local/lib/python3.6/site-packages/mars/core.py in fetch(self, session, **kw) 373 if session is None: 374 session = Session.default_or_local() --> 375 return session.fetch(self, **kw) 376 377 def _attach_session(self, session): ~/.local/lib/python3.6/site-packages/mars/session.py in fetch(self, *tileables, **kw) 494 for r, t in zip(result, tileables): 495 if hasattr(t, 'isscalar') and t.isscalar() and hasattr(r, 'item'): --> 496 ret.append(r.item()) 497 else: 498 ret.append(r) ValueError: can only convert an array of size 1 to a Python scalar
ValueError
def swapaxes(a, axis1, axis2): """ Interchange two axes of a tensor. Parameters ---------- a : array_like Input tensor. axis1 : int First axis. axis2 : int Second axis. Returns ------- a_swapped : Tensor If `a` is a Tensor, then a view of `a` is returned; otherwise a new tensor is created. Examples -------- >>> import mars.tensor as mt >>> x = mt.array([[1,2,3]]) >>> mt.swapaxes(x,0,1).execute() array([[1], [2], [3]]) >>> x = mt.array([[[0,1],[2,3]],[[4,5],[6,7]]]) >>> x.execute() array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> mt.swapaxes(x,0,2).execute() array([[[0, 4], [2, 6]], [[1, 5], [3, 7]]]) """ a = astensor(a) axis1 = validate_axis(a.ndim, axis1) axis2 = validate_axis(a.ndim, axis2) if axis1 == axis2: return a op = TensorSwapAxes(axis1, axis2, dtype=a.dtype, sparse=a.issparse()) return op(a)
def swapaxes(a, axis1, axis2): """ Interchange two axes of a tensor. Parameters ---------- a : array_like Input tensor. axis1 : int First axis. axis2 : int Second axis. Returns ------- a_swapped : Tensor If `a` is a Tensor, then a view of `a` is returned; otherwise a new tensor is created. Examples -------- >>> import mars.tensor as mt >>> x = mt.array([[1,2,3]]) >>> mt.swapaxes(x,0,1).execute() array([[1], [2], [3]]) >>> x = mt.array([[[0,1],[2,3]],[[4,5],[6,7]]]) >>> x.execute() array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> mt.swapaxes(x,0,2).execute() array([[[0, 4], [2, 6]], [[1, 5], [3, 7]]]) """ axis1 = validate_axis(a.ndim, axis1) axis2 = validate_axis(a.ndim, axis2) if axis1 == axis2: return a op = TensorSwapAxes(axis1, axis2, dtype=a.dtype, sparse=a.issparse()) return op(a)
https://github.com/mars-project/mars/issues/1552
In [35]: p = np.random.rand(3,4,5) In [36]: mt.swapaxes(p, 0, -1) --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-36-016cb9916fdb> in <module> ----> 1 mt.swapaxes(p, 0, -1) ~/anaconda3/envs/pymars0.6/lib/python3.7/site-packages/mars/tensor/base/swapaxes.py in swapaxes(a, axis1, axis2) 150 return a 151 --> 152 op = TensorSwapAxes(axis1, axis2, dtype=a.dtype, sparse=a.issparse()) 153 return op(a) AttributeError: 'numpy.ndarray' object has no attribute 'issparse'
AttributeError
def yield_execution_pool(self): actor_cls = self.get("_actor_cls") actor_uid = self.get("_actor_uid") op_key = self.get("_op_key") if not actor_cls or not actor_uid: # pragma: no cover return from .actors import new_client from .actors.errors import ActorAlreadyExist from .worker.daemon import WorkerDaemonActor client = new_client() worker_addr = self.get_local_address() if client.has_actor( client.actor_ref(WorkerDaemonActor.default_uid(), address=worker_addr) ): holder = client.actor_ref(WorkerDaemonActor.default_uid(), address=worker_addr) else: holder = client while True: try: random_tail = "".join( random.choice(string.ascii_letters + string.digits) for _ in range(5) ) uid = f"w:0:mars-cpu-calc-backup-{os.getpid()}-{op_key}-{random_tail}" uid = self._actor_ctx.distributor.make_same_process(uid, actor_uid) ref = holder.create_actor(actor_cls, uid=uid, address=worker_addr) break except ActorAlreadyExist: # pragma: no cover pass return ref
def yield_execution_pool(self): actor_cls = self.get("_actor_cls") actor_uid = self.get("_actor_uid") op_key = self.get("_op_key") if not actor_cls or not actor_uid: # pragma: no cover return from .actors import new_client from .worker.daemon import WorkerDaemonActor client = new_client() worker_addr = self.get_local_address() if client.has_actor( client.actor_ref(WorkerDaemonActor.default_uid(), address=worker_addr) ): holder = client.actor_ref(WorkerDaemonActor.default_uid(), address=worker_addr) else: holder = client uid = f"w:0:mars-cpu-calc-backup-{os.getpid()}-{op_key}-{random.randint(-1, 9999)}" uid = self._actor_ctx.distributor.make_same_process(uid, actor_uid) ref = holder.create_actor(actor_cls, uid=uid, address=worker_addr) return ref
https://github.com/mars-project/mars/issues/1543
Traceback (most recent call last): File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun/Code/mars/mars/dataframe/groupby/tests/test_groupby_execution.py", line 401, in testGroupByApply pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0], File "/Users/wenjun/Code/mars/mars/tests/core.py", line 686, in execute_tileable result = super().execute_tileable(tileable, *args, **kwargs) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 720, in execute_tileable ret = self.execute_graph(chunk_graph, result_keys, n_parallel=n_parallel or n_thread, File "/Users/wenjun/Code/mars/mars/tests/core.py", line 673, in execute_graph return super().execute_graph(graph, keys, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun/Code/mars/mars/tests/core.py", line 591, in _execute_operand super()._execute_operand(op) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun/Code/mars/mars/dataframe/groupby/apply.py", line 63, in execute assert len(applied.index) == 1 AssertionError
AssertionError
def _call_dataframe(self, df, dtypes=None, index=None): dtypes, index_value = self._infer_df_func_returns(df, dtypes, index) if index_value is None: index_value = parse_index(None, (df.key, df.index_value.key)) for arg, desc in zip((self.output_types, dtypes), ("output_types", "dtypes")): if arg is None: raise TypeError( f"Cannot determine {desc} by calculating with enumerate data, " "please specify it as arguments" ) if index_value == "inherit": index_value = df.index_value if self._elementwise: shape = df.shape elif self.output_types[0] == OutputType.dataframe: shape = [np.nan, np.nan] shape[1 - self.axis] = df.shape[1 - self.axis] shape = tuple(shape) else: shape = (df.shape[1 - self.axis],) if self.output_types[0] == OutputType.dataframe: if self.axis == 0: return self.new_dataframe( [df], shape=shape, dtypes=dtypes, index_value=index_value, columns_value=parse_index(dtypes.index), ) else: return self.new_dataframe( [df], shape=shape, dtypes=dtypes, index_value=df.index_value, columns_value=parse_index(dtypes.index), ) else: return self.new_series([df], shape=shape, dtype=dtypes, index_value=index_value)
def _call_dataframe(self, df, dtypes=None, index=None): dtypes, index_value = self._infer_df_func_returns(df, dtypes, index) for arg, desc in zip( (self.output_types, dtypes, index_value), ("output_types", "dtypes", "index") ): if arg is None: raise TypeError( f"Cannot determine {desc} by calculating with enumerate data, " "please specify it as arguments" ) if index_value == "inherit": index_value = df.index_value if self._elementwise: shape = df.shape elif self.output_types[0] == OutputType.dataframe: shape = [np.nan, np.nan] shape[1 - self.axis] = df.shape[1 - self.axis] shape = tuple(shape) else: shape = (df.shape[1 - self.axis],) if self.output_types[0] == OutputType.dataframe: if self.axis == 0: return self.new_dataframe( [df], shape=shape, dtypes=dtypes, index_value=index_value, columns_value=parse_index(dtypes.index), ) else: return self.new_dataframe( [df], shape=shape, dtypes=dtypes, index_value=df.index_value, columns_value=parse_index(dtypes.index), ) else: return self.new_series([df], shape=shape, dtype=dtypes, index_value=index_value)
https://github.com/mars-project/mars/issues/1543
Traceback (most recent call last): File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun/Code/mars/mars/dataframe/groupby/tests/test_groupby_execution.py", line 401, in testGroupByApply pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0], File "/Users/wenjun/Code/mars/mars/tests/core.py", line 686, in execute_tileable result = super().execute_tileable(tileable, *args, **kwargs) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 720, in execute_tileable ret = self.execute_graph(chunk_graph, result_keys, n_parallel=n_parallel or n_thread, File "/Users/wenjun/Code/mars/mars/tests/core.py", line 673, in execute_graph return super().execute_graph(graph, keys, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun/Code/mars/mars/tests/core.py", line 591, in _execute_operand super()._execute_operand(op) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun/Code/mars/mars/dataframe/groupby/apply.py", line 63, in execute assert len(applied.index) == 1 AssertionError
AssertionError
def df_apply( df, func, axis=0, raw=False, result_type=None, args=(), dtypes=None, output_type=None, index=None, elementwise=None, **kwds, ): if isinstance(func, (list, dict)): return df.aggregate(func) output_types = kwds.pop("output_types", None) object_type = kwds.pop("object_type", None) output_types = validate_output_types( output_type=output_type, output_types=output_types, object_type=object_type ) output_type = output_types[0] if output_types else None # calling member function if isinstance(func, str): func = getattr(df, func) sig = inspect.getfullargspec(func) if "axis" in sig.args: kwds["axis"] = axis return func(*args, **kwds) op = ApplyOperand( func=func, axis=axis, raw=raw, result_type=result_type, args=args, kwds=kwds, output_types=output_type, elementwise=elementwise, ) return op(df, dtypes=dtypes, index=index)
def df_apply( df, func, axis=0, raw=False, result_type=None, args=(), dtypes=None, output_type=None, index=None, elementwise=None, **kwds, ): if isinstance(func, (list, dict)): return df.aggregate(func) if isinstance(output_type, str): output_type = getattr(OutputType, output_type.lower()) # calling member function if isinstance(func, str): func = getattr(df, func) sig = inspect.getfullargspec(func) if "axis" in sig.args: kwds["axis"] = axis return func(*args, **kwds) op = ApplyOperand( func=func, axis=axis, raw=raw, result_type=result_type, args=args, kwds=kwds, output_type=output_type, elementwise=elementwise, ) return op(df, dtypes=dtypes, index=index)
https://github.com/mars-project/mars/issues/1543
Traceback (most recent call last): File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun/Code/mars/mars/dataframe/groupby/tests/test_groupby_execution.py", line 401, in testGroupByApply pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0], File "/Users/wenjun/Code/mars/mars/tests/core.py", line 686, in execute_tileable result = super().execute_tileable(tileable, *args, **kwargs) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 720, in execute_tileable ret = self.execute_graph(chunk_graph, result_keys, n_parallel=n_parallel or n_thread, File "/Users/wenjun/Code/mars/mars/tests/core.py", line 673, in execute_graph return super().execute_graph(graph, keys, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun/Code/mars/mars/tests/core.py", line 591, in _execute_operand super()._execute_operand(op) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun/Code/mars/mars/dataframe/groupby/apply.py", line 63, in execute assert len(applied.index) == 1 AssertionError
AssertionError
def _infer_df_func_returns(self, in_groupby, in_df, dtypes, index): index_value, output_type, new_dtypes = None, None, None try: if in_df.op.output_types[0] == OutputType.dataframe: test_df = build_df(in_df, size=2) else: test_df = build_series(in_df, size=2, name=in_df.name) selection = getattr(in_groupby.op, "selection", None) if selection: test_df = test_df[selection] with np.errstate(all="ignore"): infer_df = self.func(test_df, *self.args, **self.kwds) # todo return proper index when sort=True is implemented index_value = parse_index(None, in_df.key, self.func) if infer_df is None: output_type = get_output_types(in_df)[0] index_value = parse_index(pd.Index([], dtype=np.object)) if output_type == OutputType.dataframe: new_dtypes = pd.Series([], index=pd.Index([])) else: new_dtypes = (None, np.dtype("O")) elif isinstance(infer_df, pd.DataFrame): output_type = output_type or OutputType.dataframe new_dtypes = new_dtypes or infer_df.dtypes elif isinstance(infer_df, pd.Series): output_type = output_type or OutputType.series new_dtypes = new_dtypes or (infer_df.name, infer_df.dtype) else: output_type = OutputType.series new_dtypes = (None, pd.Series(infer_df).dtype) except: # noqa: E722 # nosec pass self.output_types = [output_type] if not self.output_types else self.output_types dtypes = new_dtypes if dtypes is None else dtypes index_value = index_value if index is None else parse_index(index) return dtypes, index_value
def _infer_df_func_returns(self, in_groupby, in_df, dtypes, index): index_value, output_type, new_dtypes = None, None, None try: if in_df.op.output_types[0] == OutputType.dataframe: test_df = build_df(in_df, size=2) else: test_df = build_series(in_df, size=2, name=in_df.name) selection = getattr(in_groupby.op, "selection", None) if selection: test_df = test_df[selection] with np.errstate(all="ignore"): infer_df = self.func(test_df, *self.args, **self.kwds) # todo return proper index when sort=True is implemented index_value = parse_index(None, in_df.key, self.func) if isinstance(infer_df, pd.DataFrame): output_type = output_type or OutputType.dataframe new_dtypes = new_dtypes or infer_df.dtypes elif isinstance(infer_df, pd.Series): output_type = output_type or OutputType.series new_dtypes = new_dtypes or (infer_df.name, infer_df.dtype) else: output_type = OutputType.series new_dtypes = (None, pd.Series(infer_df).dtype) except: # noqa: E722 # nosec pass self.output_types = [output_type] if not self.output_types else self.output_types dtypes = new_dtypes if dtypes is None else dtypes index_value = index_value if index is None else parse_index(index) return dtypes, index_value
https://github.com/mars-project/mars/issues/1543
Traceback (most recent call last): File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun/Code/mars/mars/dataframe/groupby/tests/test_groupby_execution.py", line 401, in testGroupByApply pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0], File "/Users/wenjun/Code/mars/mars/tests/core.py", line 686, in execute_tileable result = super().execute_tileable(tileable, *args, **kwargs) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 720, in execute_tileable ret = self.execute_graph(chunk_graph, result_keys, n_parallel=n_parallel or n_thread, File "/Users/wenjun/Code/mars/mars/tests/core.py", line 673, in execute_graph return super().execute_graph(graph, keys, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun/Code/mars/mars/tests/core.py", line 591, in _execute_operand super()._execute_operand(op) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun/Code/mars/mars/dataframe/groupby/apply.py", line 63, in execute assert len(applied.index) == 1 AssertionError
AssertionError
def __call__(self, groupby, dtypes=None, index=None): in_df = groupby while in_df.op.output_types[0] not in (OutputType.dataframe, OutputType.series): in_df = in_df.inputs[0] dtypes, index_value = self._infer_df_func_returns(groupby, in_df, dtypes, index) if index_value is None: index_value = parse_index(None, (in_df.key, in_df.index_value.key)) for arg, desc in zip((self.output_types, dtypes), ("output_types", "dtypes")): if arg is None: raise TypeError( f"Cannot determine {desc} by calculating with enumerate data, " "please specify it as arguments" ) if self.output_types[0] == OutputType.dataframe: new_shape = (np.nan, len(dtypes)) return self.new_dataframe( [groupby], shape=new_shape, dtypes=dtypes, index_value=index_value, columns_value=parse_index(dtypes.index, store_data=True), ) else: name, dtype = dtypes new_shape = (np.nan,) return self.new_series( [groupby], name=name, shape=new_shape, dtype=dtype, index_value=index_value )
def __call__(self, groupby, dtypes=None, index=None): in_df = groupby while in_df.op.output_types[0] not in (OutputType.dataframe, OutputType.series): in_df = in_df.inputs[0] dtypes, index_value = self._infer_df_func_returns(groupby, in_df, dtypes, index) for arg, desc in zip( (self.output_types, dtypes, index_value), ("output_types", "dtypes", "index") ): if arg is None: raise TypeError( f"Cannot determine {desc} by calculating with enumerate data, " "please specify it as arguments" ) if self.output_types[0] == OutputType.dataframe: new_shape = (np.nan, len(dtypes)) return self.new_dataframe( [groupby], shape=new_shape, dtypes=dtypes, index_value=index_value, columns_value=parse_index(dtypes.index, store_data=True), ) else: name, dtype = dtypes new_shape = (np.nan,) return self.new_series( [groupby], name=name, shape=new_shape, dtype=dtype, index_value=index_value )
https://github.com/mars-project/mars/issues/1543
Traceback (most recent call last): File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun/Code/mars/mars/dataframe/groupby/tests/test_groupby_execution.py", line 401, in testGroupByApply pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0], File "/Users/wenjun/Code/mars/mars/tests/core.py", line 686, in execute_tileable result = super().execute_tileable(tileable, *args, **kwargs) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 720, in execute_tileable ret = self.execute_graph(chunk_graph, result_keys, n_parallel=n_parallel or n_thread, File "/Users/wenjun/Code/mars/mars/tests/core.py", line 673, in execute_graph return super().execute_graph(graph, keys, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun/Code/mars/mars/tests/core.py", line 591, in _execute_operand super()._execute_operand(op) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun/Code/mars/mars/dataframe/groupby/apply.py", line 63, in execute assert len(applied.index) == 1 AssertionError
AssertionError
def groupby_apply( groupby, func, *args, dtypes=None, index=None, output_type=None, **kwargs ): # todo this can be done with sort_index implemented if not groupby.op.groupby_params.get("as_index", True): raise NotImplementedError("apply when set_index == False is not supported") output_types = kwargs.pop("output_types", None) object_type = kwargs.pop("object_type", None) output_types = validate_output_types( output_types=output_types, output_type=output_type, object_type=object_type ) op = GroupByApply(func=func, args=args, kwds=kwargs, output_types=output_types) return op(groupby, dtypes=dtypes, index=index)
def groupby_apply( groupby, func, *args, dtypes=None, index=None, output_types=None, **kwargs ): # todo this can be done with sort_index implemented if not groupby.op.groupby_params.get("as_index", True): raise NotImplementedError("apply when set_index == False is not supported") op = GroupByApply(func=func, args=args, kwds=kwargs, output_types=output_types) return op(groupby, dtypes=dtypes, index=index)
https://github.com/mars-project/mars/issues/1543
Traceback (most recent call last): File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 60, in testPartExecutor yield File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 676, in run self._callTestMethod(testMethod) File "/Users/wenjun/miniconda3/lib/python3.8/unittest/case.py", line 633, in _callTestMethod method() File "/Users/wenjun/Code/mars/mars/dataframe/groupby/tests/test_groupby_execution.py", line 401, in testGroupByApply pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0], File "/Users/wenjun/Code/mars/mars/tests/core.py", line 686, in execute_tileable result = super().execute_tileable(tileable, *args, **kwargs) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 720, in execute_tileable ret = self.execute_graph(chunk_graph, result_keys, n_parallel=n_parallel or n_thread, File "/Users/wenjun/Code/mars/mars/tests/core.py", line 673, in execute_graph return super().execute_graph(graph, keys, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 693, in execute_graph res = graph_execution.execute(retval) File "/Users/wenjun/Code/mars/mars/executor.py", line 574, in execute future.result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/Users/wenjun/miniconda3/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/Users/wenjun/Code/mars/mars/tests/core.py", line 591, in _execute_operand super()._execute_operand(op) File "/Users/wenjun/Code/mars/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/Users/wenjun/Code/mars/mars/executor.py", line 446, in _execute_operand self.handle_op(first_op, results, self._mock) File "/Users/wenjun/Code/mars/mars/executor.py", line 378, in handle_op return Executor.handle(*args, **kw) File "/Users/wenjun/Code/mars/mars/executor.py", line 644, in handle return runner(results, op) File "/Users/wenjun/Code/mars/mars/dataframe/groupby/apply.py", line 63, in execute assert len(applied.index) == 1 AssertionError
AssertionError
def to_pandas(self): data = getattr(self, "_data", None) sortorder = getattr(self, "_sortorder", None) if data is None: return pd.MultiIndex.from_arrays( [np.array([], dtype=dtype) for dtype in self._dtypes], sortorder=sortorder, names=self._names, ) return pd.MultiIndex.from_tuples( [tuple(d) for d in data], sortorder=sortorder, names=self._names )
def to_pandas(self): data = getattr(self, "_data", None) if data is None: sortorder = getattr(self, "_sortorder", None) return pd.MultiIndex.from_arrays( [np.array([], dtype=dtype) for dtype in self._dtypes], sortorder=sortorder, names=self._names, ) return pd.MultiIndex.from_tuples( [tuple(d) for d in data], sortorder=self._sortorder, names=self._names )
https://github.com/mars-project/mars/issues/1542
In [1]: from mars.session import new_session In [2]: import mars.dataframe as md In [3]: new_session(backend='ray').as_default() 2020-09-01 20:05:51,291 INFO resource_spec.py:231 -- Starting Ray with 5.08 GiB memory available for workers and up to 2.56 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>). 2020-09-01 20:05:51,883 INFO services.py:1193 -- View the Ray dashboard at localhost:8265 Out[3]: <mars.session.Session at 0x7fc51364fb50> In [4]: df = md.read_csv('Downloads/ratings.csv') In [5]: df.groupby('userId').agg({'rating': ['min', 'max', 'mean', 'std']}).exec ...: ute() --------------------------------------------------------------------------- RayTaskError(TypeError) Traceback (most recent call last) <ipython-input-5-180cc92d1395> in <module> ----> 1 df.groupby('userId').agg({'rating': ['min', 'max', 'mean', 'std']}).execute() ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 576 577 def execute(self, session=None, **kw): --> 578 self._data.execute(session, **kw) 579 return self 580 ~/Workspace/mars/mars/core.py in execute(self, session, **kw) 364 365 # no more fetch, thus just fire run --> 366 session.run(self, **kw) 367 # return Tileable or ExecutableTuple itself 368 return self ~/Workspace/mars/mars/session.py in run(self, *tileables, **kw) 478 tileables = tuple(mt.tensor(t) if not isinstance(t, (Entity, Base)) else t 479 for t in tileables) --> 480 result = self._sess.run(*tileables, **kw) 481 482 for t in tileables: ~/Workspace/mars/mars/ray/core.py in run(self, *tileables, **kw) 188 if 'n_parallel' not in kw: # pragma: no cover 189 kw['n_parallel'] = ray.cluster_resources()['CPU'] --> 190 return self._executor.execute_tileables(tileables, **kw) 191 192 def __enter__(self): ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in execute_tileables(self, tileables, fetch, n_parallel, n_thread, print_progress, mock, compose, name) 879 n_parallel=n_parallel or n_thread, 880 print_progress=print_progress, mock=mock, --> 881 chunk_result=chunk_result) 882 883 # update shape of tileable and its chunks whatever it's successful or not ~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, no_intermediate, compose, retval, chunk_result) 691 print_progress=print_progress, mock=mock, mock_max_memory=self._mock_max_memory, 692 fetch_keys=fetch_keys, no_intermediate=no_intermediate) --> 693 res = graph_execution.execute(retval) 694 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory) 695 if mock: ~/Workspace/mars/mars/executor.py in execute(self, retval) 572 # wait until all the futures completed 573 for future in executed_futures: --> 574 future.result() 575 576 if retval: ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout) 426 raise CancelledError() 427 elif self._state == FINISHED: --> 428 return self.__get_result() 429 430 self._condition.wait(timeout) ~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self) 382 def __get_result(self): 383 if self._exception: --> 384 raise self._exception 385 else: 386 return self._result ~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self) 55 56 try: ---> 57 result = self.fn(*self.args, **self.kwargs) 58 except BaseException as exc: 59 self.future.set_exception(exc) ~/Workspace/mars/mars/utils.py in _inner(*args, **kwargs) 437 def _inner(*args, **kwargs): 438 with self: --> 439 return func(*args, **kwargs) 440 441 return _inner ~/Workspace/mars/mars/executor.py in _execute_operand(self, op) 444 # so we pass the first operand's first output to Executor.handle 445 first_op = ops[0] --> 446 self.handle_op(first_op, results, self._mock) 447 448 # update maximal memory usage during execution ~/Workspace/mars/mars/ray/core.py in handle_op(self, *args, **kw) 66 class GraphExecutionForRay(GraphExecution): 67 def handle_op(self, *args, **kw): ---> 68 return RayExecutor.handle(*args, **kw) 69 70 ~/Workspace/mars/mars/ray/core.py in handle(cls, op, results, mock) 147 148 try: --> 149 return ray.get(build_remote_funtion(runner).remote(results, op)) 150 except NotImplementedError: 151 for op_cls in mapper.keys(): ~/miniconda3/lib/python3.7/site-packages/ray/worker.py in get(object_refs, timeout) 1536 worker.core_worker.dump_object_store_memory_usage() 1537 if isinstance(value, RayTaskError): -> 1538 raise value.as_instanceof_cause() 1539 else: 1540 raise value RayTaskError(TypeError): ray::mars.ray.core.remote_runner() (pid=31351, ip=30.225.12.80) File "python/ray/_raylet.pyx", line 479, in ray._raylet.execute_task File "/Users/qinxuye/Workspace/mars/mars/ray/core.py", line 144, in remote_runner return func(results, op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/datasource/read_csv.py", line 322, in execute df = cls._cudf_read_csv(op) if op.gpu else cls._pandas_read_csv(f, op) File "/Users/qinxuye/Workspace/mars/mars/dataframe/datasource/read_csv.py", line 273, in _pandas_read_csv dtype=dtypes.to_dict(), nrows=op.nrows, **csv_kwargs) TypeError: parser_f() got an unexpected keyword argument 'outputs_ref'
TypeError