after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _upload(self, from_file, to_info, **_kwargs):
bucket = self.gs.bucket(to_info.bucket)
_upload_to_bucket(bucket, from_file)
|
def _upload(self, from_file, to_info, **_kwargs):
bucket = self.gs.bucket(to_info.bucket)
blob = bucket.blob(to_info.path)
blob.upload_from_filename(from_file)
|
https://github.com/iterative/dvc/issues/2572
|
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Preparing to upload data to 'gs://dse/dvc/dataset_20191004'
DEBUG: Preparing to collect status from gs://dse/dvc/dataset_20191004
DEBUG: Collecting information from local cache...
DEBUG: Path .dvc/cache/a7/a404b4826cca3b01dd5d8e6326de3e inode 7438277
DEBUG: SELECT mtime, size, md5, timestamp from state WHERE inode=?
DEBUG: fetched: [('1570168433714557952', '54785677', 'a7a404b4826cca3b01dd5d8e6326de3e', '1570174167312645120')]
DEBUG: UPDATE state SET timestamp = ? WHERE inode = ?
DEBUG: cache '.dvc/cache/a7/a404b4826cca3b01dd5d8e6326de3e' expected 'a7a404b4826cca3b01dd5d8e6326de3e' actual 'a7a404b4826cca3b01dd5d8e6326de3e'
DEBUG: Collecting information from remote cache...
DEBUG: Uploading '.dvc/cache/a7/a404b4826cca3b01dd5d8e6326de3e' to 'gs://dse/dvc/dataset_20191004/a7/a404b4826cca3b01dd5d8e6326de3e'
ERROR: failed to upload '.dvc/cache/a7/a404b4826cca3b01dd5d8e6326de3e' to 'gs://dse/dvc/dataset_20191004/a7/a404b4826cca3b01dd5d8e6326de3e' - ('Connection aborted.', timeout('The write operation timed out',))
Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help!
DEBUG: SELECT count from state_info WHERE rowid=?
DEBUG: fetched: [(2,)]
DEBUG: UPDATE state_info SET count = ? WHERE rowid = ?
ERROR: failed to push data to the cloud - 1 files failed to upload
------------------------------------------------------------
Traceback (most recent call last):
File "dvc/command/data_sync.py", line 50, in run
File "dvc/repo/__init__.py", line 33, in wrapper
File "dvc/repo/push.py", line 28, in push
File "dvc/data_cloud.py", line 63, in push
File "dvc/remote/local/__init__.py", line 403, in push
File "dvc/remote/local/__init__.py", line 393, in _process
dvc.exceptions.UploadError: 1 files failed to upload
|
dvc.exceptions.UploadError
|
def _reproduce(self, target, **kwargs):
import networkx as nx
from dvc.stage import Stage
stage = Stage.load(self, target)
G = self.graph()[1]
stages = nx.get_node_attributes(G, "stage")
node = relpath(stage.path, self.root_dir)
return _reproduce_stages(G, stages, node, **kwargs)
|
def _reproduce(self, target, single_item=False, **kwargs):
import networkx as nx
from dvc.stage import Stage
stage = Stage.load(self, target)
G = self.graph()[1]
stages = nx.get_node_attributes(G, "stage")
node = relpath(stage.path, self.root_dir)
if single_item:
ret = _reproduce_stage(stages, node, **kwargs)
else:
ret = _reproduce_stages(G, stages, node, **kwargs)
return ret
|
https://github.com/iterative/dvc/issues/2354
|
(base) ➜ dvc-example git:(master) ✗ dvc repro -sf test.txt.dvc --verbose
DEBUG: Trying to spawn '['/Users/prihodad/anaconda3/bin/python', '-m', 'dvc', 'daemon', '-q', 'updater']'
DEBUG: Spawned '['/Users/prihodad/anaconda3/bin/python', '-m', 'dvc', 'daemon', '-q', 'updater']'
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
Reproducing 'test.txt.dvc'
DEBUG: SELECT count from state_info WHERE rowid=?
DEBUG: fetched: [(8,)]
DEBUG: UPDATE state_info SET count = ? WHERE rowid = ?
ERROR: unexpected error - run() got an unexpected keyword argument 'ignore_build_cache'
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/main.py", line 40, in main
ret = cmd.run_cmd()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/base.py", line 63, in run_cmd
return self.run()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/repro.py", line 42, in run
recursive=self.args.recursive,
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/reproduce.py", line 80, in reproduce
stages = _reproduce(self, target, **kwargs)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/reproduce.py", line 96, in _reproduce
ret = _reproduce_stage(stages, node, **kwargs)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/reproduce.py", line 22, in _reproduce_stage
stage = stage.reproduce(**kwargs)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 331, in reproduce
self.run(**kwargs)
TypeError: run() got an unexpected keyword argument 'ignore_build_cache'
------------------------------------------------------------
|
TypeError
|
def _reproduce_stages(
G,
stages,
node,
downstream=False,
ignore_build_cache=False,
single_item=False,
**kwargs,
):
r"""Derive the evaluation of the given node for the given graph.
When you _reproduce a stage_, you want to _evaluate the descendants_
to know if it make sense to _recompute_ it. A post-ordered search
will give us an order list of the nodes we want.
For example, let's say that we have the following pipeline:
E
/ \
D F
/ \ \
B C G
\ /
A
The derived evaluation of D would be: [A, B, C, D]
In case that `downstream` option is specifed, the desired effect
is to derive the evaluation starting from the given stage up to the
ancestors. However, the `networkx.ancestors` returns a set, without
any guarantee of any order, so we are going to reverse the graph and
use a pre-ordered search using the given stage as a starting point.
E A
/ \ / \
D F B C G
/ \ \ --- reverse --> \ / /
B C G D F
\ / \ /
A E
The derived evaluation of _downstream_ B would be: [B, D, E]
"""
import networkx as nx
if single_item:
pipeline = [node]
elif downstream:
# NOTE (py3 only):
# Python's `deepcopy` defaults to pickle/unpickle the object.
# Stages are complex objects (with references to `repo`, `outs`,
# and `deps`) that cause struggles when you try to serialize them.
# We need to create a copy of the graph itself, and then reverse it,
# instead of using graph.reverse() directly because it calls
# `deepcopy` underneath -- unless copy=False is specified.
pipeline = nx.dfs_preorder_nodes(G.copy().reverse(copy=False), node)
else:
pipeline = nx.dfs_postorder_nodes(G, node)
result = []
for n in pipeline:
try:
ret = _reproduce_stage(stages, n, **kwargs)
if len(ret) != 0 and ignore_build_cache:
# NOTE: we are walking our pipeline from the top to the
# bottom. If one stage is changed, it will be reproduced,
# which tells us that we should force reproducing all of
# the other stages down below, even if their direct
# dependencies didn't change.
kwargs["force"] = True
result += ret
except Exception as ex:
raise ReproductionError(stages[n].relpath, ex)
return result
|
def _reproduce_stages(
G, stages, node, downstream=False, ignore_build_cache=False, **kwargs
):
r"""Derive the evaluation of the given node for the given graph.
When you _reproduce a stage_, you want to _evaluate the descendants_
to know if it make sense to _recompute_ it. A post-ordered search
will give us an order list of the nodes we want.
For example, let's say that we have the following pipeline:
E
/ \
D F
/ \ \
B C G
\ /
A
The derived evaluation of D would be: [A, B, C, D]
In case that `downstream` option is specifed, the desired effect
is to derive the evaluation starting from the given stage up to the
ancestors. However, the `networkx.ancestors` returns a set, without
any guarantee of any order, so we are going to reverse the graph and
use a pre-ordered search using the given stage as a starting point.
E A
/ \ / \
D F B C G
/ \ \ --- reverse --> \ / /
B C G D F
\ / \ /
A E
The derived evaluation of _downstream_ B would be: [B, D, E]
"""
import networkx as nx
if downstream:
# NOTE (py3 only):
# Python's `deepcopy` defaults to pickle/unpickle the object.
# Stages are complex objects (with references to `repo`, `outs`,
# and `deps`) that cause struggles when you try to serialize them.
# We need to create a copy of the graph itself, and then reverse it,
# instead of using graph.reverse() directly because it calls
# `deepcopy` underneath -- unless copy=False is specified.
pipeline = nx.dfs_preorder_nodes(G.copy().reverse(copy=False), node)
else:
pipeline = nx.dfs_postorder_nodes(G, node)
result = []
for n in pipeline:
try:
ret = _reproduce_stage(stages, n, **kwargs)
if len(ret) != 0 and ignore_build_cache:
# NOTE: we are walking our pipeline from the top to the
# bottom. If one stage is changed, it will be reproduced,
# which tells us that we should force reproducing all of
# the other stages down below, even if their direct
# dependencies didn't change.
kwargs["force"] = True
result += ret
except Exception as ex:
raise ReproductionError(stages[n].relpath, ex)
return result
|
https://github.com/iterative/dvc/issues/2354
|
(base) ➜ dvc-example git:(master) ✗ dvc repro -sf test.txt.dvc --verbose
DEBUG: Trying to spawn '['/Users/prihodad/anaconda3/bin/python', '-m', 'dvc', 'daemon', '-q', 'updater']'
DEBUG: Spawned '['/Users/prihodad/anaconda3/bin/python', '-m', 'dvc', 'daemon', '-q', 'updater']'
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
Reproducing 'test.txt.dvc'
DEBUG: SELECT count from state_info WHERE rowid=?
DEBUG: fetched: [(8,)]
DEBUG: UPDATE state_info SET count = ? WHERE rowid = ?
ERROR: unexpected error - run() got an unexpected keyword argument 'ignore_build_cache'
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/main.py", line 40, in main
ret = cmd.run_cmd()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/base.py", line 63, in run_cmd
return self.run()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/repro.py", line 42, in run
recursive=self.args.recursive,
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/reproduce.py", line 80, in reproduce
stages = _reproduce(self, target, **kwargs)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/reproduce.py", line 96, in _reproduce
ret = _reproduce_stage(stages, node, **kwargs)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/reproduce.py", line 22, in _reproduce_stage
stage = stage.reproduce(**kwargs)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 331, in reproduce
self.run(**kwargs)
TypeError: run() got an unexpected keyword argument 'ignore_build_cache'
------------------------------------------------------------
|
TypeError
|
def __getattr__(self, name):
# When deepcopy is called, it creates and object without __init__,
# self.parsed is not initialized and it causes infinite recursion.
# More on this special casing here:
# https://stackoverflow.com/a/47300262/298182
if name.startswith("__"):
raise AttributeError(name)
return getattr(self.parsed, name)
|
def __getattr__(self, name):
return getattr(self.parsed, name)
|
https://github.com/iterative/dvc/issues/2259
|
---------------------------------------------------------------------------
RecursionError Traceback (most recent call last)
<ipython-input-7-90086e6e0e64> in <module>
----> 1 r.graph()[0].reverse()
~/miniconda3/envs/py36_v11/lib/python3.6/site-packages/networkx/classes/digraph.py in reverse(self, copy)
1195 H = self.__class__()
1196 H.graph.update(deepcopy(self.graph))
-> 1197 H.add_nodes_from((n, deepcopy(d)) for n, d in self.node.items())
1198 H.add_edges_from((v, u, deepcopy(d)) for u, v, d
1199 in self.edges(data=True))
~/miniconda3/envs/py36_v11/lib/python3.6/site-packages/networkx/classes/digraph.py in add_nodes_from(self, nodes_for_adding, **attr)
470
471 """
--> 472 for n in nodes_for_adding:
473 # keep all this inside try/except because
474 # CPython throws TypeError on n not in self._succ,
~/miniconda3/envs/py36_v11/lib/python3.6/site-packages/networkx/classes/digraph.py in <genexpr>(.0)
1195 H = self.__class__()
1196 H.graph.update(deepcopy(self.graph))
-> 1197 H.add_nodes_from((n, deepcopy(d)) for n, d in self.node.items())
1198 H.add_edges_from((v, u, deepcopy(d)) for u, v, d
1199 in self.edges(data=True))
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _deepcopy_dict(x, memo, deepcopy)
238 memo[id(x)] = y
239 for key, value in x.items():
--> 240 y[deepcopy(key, memo)] = deepcopy(value, memo)
241 return y
242 d[dict] = _deepcopy_dict
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
178 y = x
179 else:
--> 180 y = _reconstruct(x, memo, *rv)
181
182 # If is its own copy, don't memoize.
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
278 if state is not None:
279 if deep:
--> 280 state = deepcopy(state, memo)
281 if hasattr(y, '__setstate__'):
282 y.__setstate__(state)
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _deepcopy_dict(x, memo, deepcopy)
238 memo[id(x)] = y
239 for key, value in x.items():
--> 240 y[deepcopy(key, memo)] = deepcopy(value, memo)
241 return y
242 d[dict] = _deepcopy_dict
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
178 y = x
179 else:
--> 180 y = _reconstruct(x, memo, *rv)
181
182 # If is its own copy, don't memoize.
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
278 if state is not None:
279 if deep:
--> 280 state = deepcopy(state, memo)
281 if hasattr(y, '__setstate__'):
282 y.__setstate__(state)
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _deepcopy_dict(x, memo, deepcopy)
238 memo[id(x)] = y
239 for key, value in x.items():
--> 240 y[deepcopy(key, memo)] = deepcopy(value, memo)
241 return y
242 d[dict] = _deepcopy_dict
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
178 y = x
179 else:
--> 180 y = _reconstruct(x, memo, *rv)
181
182 # If is its own copy, don't memoize.
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
278 if state is not None:
279 if deep:
--> 280 state = deepcopy(state, memo)
281 if hasattr(y, '__setstate__'):
282 y.__setstate__(state)
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _deepcopy_dict(x, memo, deepcopy)
238 memo[id(x)] = y
239 for key, value in x.items():
--> 240 y[deepcopy(key, memo)] = deepcopy(value, memo)
241 return y
242 d[dict] = _deepcopy_dict
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
178 y = x
179 else:
--> 180 y = _reconstruct(x, memo, *rv)
181
182 # If is its own copy, don't memoize.
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
278 if state is not None:
279 if deep:
--> 280 state = deepcopy(state, memo)
281 if hasattr(y, '__setstate__'):
282 y.__setstate__(state)
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _deepcopy_dict(x, memo, deepcopy)
238 memo[id(x)] = y
239 for key, value in x.items():
--> 240 y[deepcopy(key, memo)] = deepcopy(value, memo)
241 return y
242 d[dict] = _deepcopy_dict
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
178 y = x
179 else:
--> 180 y = _reconstruct(x, memo, *rv)
181
182 # If is its own copy, don't memoize.
~/miniconda3/envs/py36_v11/lib/python3.6/copy.py in _reconstruct(x, memo, func, args, state, listiter, dictiter, deepcopy)
279 if deep:
280 state = deepcopy(state, memo)
--> 281 if hasattr(y, '__setstate__'):
282 y.__setstate__(state)
283 else:
~/miniconda3/envs/py36_v11/lib/python3.6/site-packages/dvc/path_info.py in __getattr__(self, name)
185
186 def __getattr__(self, name):
--> 187 return getattr(self.parsed, name)
188
189 @cached_property
... last 1 frames repeated, from the frame below ...
~/miniconda3/envs/py36_v11/lib/python3.6/site-packages/dvc/path_info.py in __getattr__(self, name)
185
186 def __getattr__(self, name):
--> 187 return getattr(self.parsed, name)
188
189 @cached_property
RecursionError: maximum recursion depth exceeded
|
RecursionError
|
def makedirs(self, path):
self._sftp_connect()
# Single stat call will say whether this is a dir, a file or a link
st_mode = self.st_mode(path)
if stat.S_ISDIR(st_mode):
return
if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode):
raise DvcException("a file with the same name '{}' already exists".format(path))
head, tail = posixpath.split(path)
if head:
self.makedirs(head)
if tail:
try:
self._sftp.mkdir(path)
except IOError as e:
# Since paramiko errors are very vague we need to recheck
# whether it's because path already exists or something else
if e.errno == errno.EACCES or not self.exists(path):
raise
|
def makedirs(self, path):
self._sftp_connect()
if self.isdir(path):
return
if self.isfile(path) or self.islink(path):
raise DvcException("a file with the same name '{}' already exists".format(path))
head, tail = posixpath.split(path)
if head:
self.makedirs(head)
if tail:
self._sftp.mkdir(path)
|
https://github.com/iterative/dvc/issues/1862
|
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(33301,)]
DEBUG: UPDATE state_info SET count = 33301 WHERE rowid = 1
DEBUG: Path /home/rob/fossid/dvc/autoid-dvc/.dvc/cache inode 6554341
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (6554341, "44515650", "1554877442274173184", "1554879861734038784", "")
ERROR: failed to push data to the cloud - 'host'
------------------------------------------------------------
Traceback (most recent call last):
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/remote/ssh/__init__.py", line 187, in upload
from_info["path"], to_info["path"], progress_title=name
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/remote/ssh/connection.py", line 189, in upload
self.makedirs(posixpath.dirname(dest))
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/remote/ssh/connection.py", line 127, in makedirs
self._sftp.mkdir(path)
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/paramiko/sftp_client.py", line 460, in mkdir
self._request(CMD_MKDIR, path, attr)
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/paramiko/sftp_client.py", line 813, in _request
return self._read_response(num)
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/paramiko/sftp_client.py", line 865, in _read_response
self._convert_status(msg)
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/paramiko/sftp_client.py", line 898, in _convert_status
raise IOError(text)
OSError: Failure
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/command/data_sync.py", line 66, in do_run
recursive=self.args.recursive,
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/repo/push.py", line 27, in push
used, jobs, remote=remote, show_checksums=show_checksums
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/data_cloud.py", line 132, in push
show_checksums=show_checksums,
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/remote/local.py", line 713, in push
download=False,
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/remote/local.py", line 703, in _process
f.result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 425, in result
return self.__get_result()
File "/usr/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/usr/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/rob/fossid/dvc/.venv/lib/python3.6/site-packages/dvc/remote/ssh/__init__.py", line 192, in upload
host=from_info["host"],
KeyError: 'host'
------------------------------------------------------------
Having any troubles?. Hit us up at https://dvc.org/support, we are always happy to help!
DEBUG: Analytics is enabled.
DEBUG: Trying to spawn '['/home/rob/fossid/dvc/.venv/bin/python3', '-m', 'dvc', 'daemon', '-q', 'analytics', '/tmp/tmplvf38zmb']'
DEBUG: Spawned '['/home/rob/fossid/dvc/.venv/bin/python3', '-m', 'dvc', 'daemon', '-q', 'analytics', '/tmp/tmplvf38zmb']'
|
OSError
|
def _show(self, target, commands, outs, locked):
import networkx
from dvc.stage import Stage
stage = Stage.load(self.repo, target)
G = self.repo.graph()[0]
stages = networkx.get_node_attributes(G, "stage")
node = relpath(stage.path, self.repo.root_dir)
nodes = networkx.dfs_postorder_nodes(G, node)
if locked:
nodes = [n for n in nodes if stages[n].locked]
for n in nodes:
if commands:
logger.info(stages[n].cmd)
elif outs:
for out in stages[n].outs:
logger.info(str(out))
else:
logger.info(n)
|
def _show(self, target, commands, outs, locked):
import networkx
from dvc.stage import Stage
stage = Stage.load(self.repo, target)
G = self.repo.graph()[0]
stages = networkx.get_node_attributes(G, "stage")
node = os.path.relpath(stage.path, self.repo.root_dir)
nodes = networkx.dfs_postorder_nodes(G, node)
if locked:
nodes = [n for n in nodes if stages[n].locked]
for n in nodes:
if commands:
logger.info(stages[n].cmd)
elif outs:
for out in stages[n].outs:
logger.info(str(out))
else:
logger.info(n)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def __build_graph(self, target, commands, outs):
import networkx
from dvc.stage import Stage
stage = Stage.load(self.repo, target)
node = relpath(stage.path, self.repo.root_dir)
pipelines = list(filter(lambda g: node in g.nodes(), self.repo.pipelines()))
assert len(pipelines) == 1
G = pipelines[0]
stages = networkx.get_node_attributes(G, "stage")
nodes = []
for n in G.nodes():
stage = stages[n]
if commands:
if stage.cmd is None:
continue
nodes.append(stage.cmd)
elif outs:
for out in stage.outs:
nodes.append(str(out))
else:
nodes.append(stage.relpath)
edges = []
for e in G.edges():
from_stage = stages[e[0]]
to_stage = stages[e[1]]
if commands:
if to_stage.cmd is None:
continue
edges.append((from_stage.cmd, to_stage.cmd))
elif outs:
for from_out in from_stage.outs:
for to_out in to_stage.outs:
edges.append((str(from_out), str(to_out)))
else:
edges.append((from_stage.relpath, to_stage.relpath))
return nodes, edges, networkx.is_tree(G)
|
def __build_graph(self, target, commands, outs):
import networkx
from dvc.stage import Stage
stage = Stage.load(self.repo, target)
node = os.path.relpath(stage.path, self.repo.root_dir)
pipelines = list(filter(lambda g: node in g.nodes(), self.repo.pipelines()))
assert len(pipelines) == 1
G = pipelines[0]
stages = networkx.get_node_attributes(G, "stage")
nodes = []
for n in G.nodes():
stage = stages[n]
if commands:
if stage.cmd is None:
continue
nodes.append(stage.cmd)
elif outs:
for out in stage.outs:
nodes.append(str(out))
else:
nodes.append(stage.relpath)
edges = []
for e in G.edges():
from_stage = stages[e[0]]
to_stage = stages[e[1]]
if commands:
if to_stage.cmd is None:
continue
edges.append((from_stage.cmd, to_stage.cmd))
elif outs:
for from_out in from_stage.outs:
for to_out in to_stage.outs:
edges.append((str(from_out), str(to_out)))
else:
edges.append((from_stage.relpath, to_stage.relpath))
return nodes, edges, networkx.is_tree(G)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def run(self):
logger.info(relpath(Repo.find_root()))
return 0
|
def run(self):
logger.info(os.path.relpath(Repo.find_root()))
return 0
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def __init__(self, output):
super(OutputNotFoundError, self).__init__(
"unable to find stage file with output '{path}'".format(path=relpath(output))
)
|
def __init__(self, output):
super(OutputNotFoundError, self).__init__(
"unable to find stage file with output '{path}'".format(
path=os.path.relpath(output)
)
)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def __init__(self, path):
super(BadMetricError, self).__init__(
"'{}' does not exist, not a metric or is malformed".format(relpath(path))
)
|
def __init__(self, path):
super(BadMetricError, self).__init__(
"'{}' does not exist, not a metric or is malformed".format(
os.path.relpath(path)
)
)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def __init__(self, path, cause=None):
path = relpath(path)
super(StageFileCorruptedError, self).__init__(
"unable to read stage file: {} YAML file structure is corrupted".format(path),
cause=cause,
)
|
def __init__(self, path, cause=None):
path = os.path.relpath(path)
super(StageFileCorruptedError, self).__init__(
"unable to read stage file: {} YAML file structure is corrupted".format(path),
cause=cause,
)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def get_match(self, abs_path):
relative_path = relpath(abs_path, self.dirname)
if os.name == "nt":
relative_path = relative_path.replace("\\", "/")
relative_path = cast_bytes(relative_path, "utf-8")
for pattern in self.patterns:
if match_pattern(relative_path, pattern) and self._no_negate_pattern_matches(
relative_path
):
return (abs_path, pattern, self.ignore_file_path)
return None
|
def get_match(self, abs_path):
rel_path = os.path.relpath(abs_path, self.dirname)
if os.name == "nt":
rel_path = rel_path.replace("\\", "/")
rel_path = cast_bytes(rel_path, "utf-8")
for pattern in self.patterns:
if match_pattern(rel_path, pattern) and self._no_negate_pattern_matches(
rel_path
):
return (abs_path, pattern, self.ignore_file_path)
return None
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def relpath(self, other):
return self.__class__(relpath(self, other))
|
def relpath(self, other):
return self.__class__(os.path.relpath(fspath_py35(self), fspath_py35(other)))
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def _collect_dir(self, path_info):
dir_info = []
for root, dirs, files in self.walk(path_info):
if len(files) > LARGE_DIR_SIZE:
msg = "Computing md5 for a large directory {}. This is only done once."
title = str(self.path_cls(root))
logger.info(msg.format(title))
files = progress(files, name=title)
for fname in files:
file_info = self.path_cls(root) / fname
relative_path = file_info.relative_to(path_info)
dir_info.append(
{
# NOTE: this is lossy transformation:
# "hey\there" -> "hey/there"
# "hey/there" -> "hey/there"
# The latter is fine filename on Windows,
# which will transform to dir/file on back transform.
#
# Yes, this is a BUG, as long as we permit "/" in
# filenames on Windows and "\" on Unix
self.PARAM_RELPATH: relative_path.as_posix(),
self.PARAM_CHECKSUM: self.get_file_checksum(file_info),
}
)
# NOTE: sorting the list by path to ensure reproducibility
return sorted(dir_info, key=itemgetter(self.PARAM_RELPATH))
|
def _collect_dir(self, path_info):
dir_info = []
for root, dirs, files in self.walk(path_info):
if len(files) > LARGE_DIR_SIZE:
msg = "Computing md5 for a large directory {}. This is only done once."
title = str(self.path_cls(root))
logger.info(msg.format(title))
files = progress(files, name=title)
for fname in files:
file_info = self.path_cls(root) / fname
relpath = file_info.relative_to(path_info)
dir_info.append(
{
# NOTE: this is lossy transformation:
# "hey\there" -> "hey/there"
# "hey/there" -> "hey/there"
# The latter is fine filename on Windows,
# which will transform to dir/file on back transform.
#
# Yes, this is a BUG, as long as we permit "/" in
# filenames on Windows and "\" on Unix
self.PARAM_RELPATH: relpath.as_posix(),
self.PARAM_CHECKSUM: self.get_file_checksum(file_info),
}
)
# NOTE: sorting the list by path to ensure reproducibility
return sorted(dir_info, key=itemgetter(self.PARAM_RELPATH))
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def load_dir_cache(self, checksum):
path_info = self.checksum_to_path_info(checksum)
fobj = tempfile.NamedTemporaryFile(delete=False)
path = fobj.name
to_info = self.path_cls(path)
self.cache.download([path_info], [to_info], no_progress_bar=True)
try:
with open(path, "r") as fobj:
d = json.load(fobj)
except ValueError:
logger.exception("Failed to load dir cache '{}'".format(path_info))
return []
finally:
os.unlink(path)
if not isinstance(d, list):
msg = "dir cache file format error '{}' [skipping the file]"
logger.error(msg.format(relpath(path)))
return []
for info in d:
# NOTE: here is a BUG, see comment to .as_posix() below
relative_path = self.path_cls.from_posix(info[self.PARAM_RELPATH])
info[self.PARAM_RELPATH] = relative_path.fspath
return d
|
def load_dir_cache(self, checksum):
path_info = self.checksum_to_path_info(checksum)
fobj = tempfile.NamedTemporaryFile(delete=False)
path = fobj.name
to_info = self.path_cls(path)
self.cache.download([path_info], [to_info], no_progress_bar=True)
try:
with open(path, "r") as fobj:
d = json.load(fobj)
except ValueError:
logger.exception("Failed to load dir cache '{}'".format(path_info))
return []
finally:
os.unlink(path)
if not isinstance(d, list):
msg = "dir cache file format error '{}' [skipping the file]"
logger.error(msg.format(os.path.relpath(path)))
return []
for info in d:
# NOTE: here is a BUG, see comment to .as_posix() below
relpath = self.path_cls.from_posix(info[self.PARAM_RELPATH])
info[self.PARAM_RELPATH] = relpath.fspath
return d
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def _checkout_dir(self, path_info, checksum, force, progress_callback=None):
# Create dir separately so that dir is created
# even if there are no files in it
if not self.exists(path_info):
self.makedirs(path_info)
dir_info = self.get_dir_cache(checksum)
logger.debug("Linking directory '{}'.".format(path_info))
for entry in dir_info:
relative_path = entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
entry_cache_info = self.checksum_to_path_info(entry_checksum)
entry_info = path_info / relative_path
entry_checksum_info = {self.PARAM_CHECKSUM: entry_checksum}
if self.changed(entry_info, entry_checksum_info):
if self.exists(entry_info):
self.safe_remove(entry_info, force=force)
self.link(entry_cache_info, entry_info)
self.state.save(entry_info, entry_checksum)
if progress_callback:
progress_callback.update(str(entry_info))
self._remove_redundant_files(path_info, dir_info, force)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
|
def _checkout_dir(self, path_info, checksum, force, progress_callback=None):
# Create dir separately so that dir is created
# even if there are no files in it
if not self.exists(path_info):
self.makedirs(path_info)
dir_info = self.get_dir_cache(checksum)
logger.debug("Linking directory '{}'.".format(path_info))
for entry in dir_info:
relpath = entry[self.PARAM_RELPATH]
entry_checksum = entry[self.PARAM_CHECKSUM]
entry_cache_info = self.checksum_to_path_info(entry_checksum)
entry_info = path_info / relpath
entry_checksum_info = {self.PARAM_CHECKSUM: entry_checksum}
if self.changed(entry_info, entry_checksum_info):
if self.exists(entry_info):
self.safe_remove(entry_info, force=force)
self.link(entry_cache_info, entry_info)
self.state.save(entry_info, entry_checksum)
if progress_callback:
progress_callback.update(str(entry_info))
self._remove_redundant_files(path_info, dir_info, force)
self.state.save_link(path_info)
self.state.save(path_info, checksum)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def resolve_path(path, config_file):
"""Resolve path relative to config file location.
Args:
path: Path to be resolved.
config_file: Path to config file, which `path` is specified
relative to.
Returns:
Path relative to the `config_file` location. If `path` is an
absolute path then it will be returned without change.
"""
if os.path.isabs(path):
return path
return relpath(path, os.path.dirname(config_file))
|
def resolve_path(path, config_file):
"""Resolve path relative to config file location.
Args:
path: Path to be resolved.
config_file: Path to config file, which `path` is specified
relative to.
Returns:
Path relative to the `config_file` location. If `path` is an
absolute path then it will be returned without change.
"""
if os.path.isabs(path):
return path
return os.path.relpath(path, os.path.dirname(config_file))
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def _unprotect_file(path):
if System.is_symlink(path) or System.is_hardlink(path):
logger.debug("Unprotecting '{}'".format(path))
tmp = os.path.join(os.path.dirname(path), "." + str(uuid.uuid4()))
# The operations order is important here - if some application
# would access the file during the process of copyfile then it
# would get only the part of file. So, at first, the file should be
# copied with the temporary name, and then original file should be
# replaced by new.
copyfile(path, tmp, name="Unprotecting '{}'".format(relpath(path)))
remove(path)
os.rename(tmp, path)
else:
logger.debug(
"Skipping copying for '{}', since it is not "
"a symlink or a hardlink.".format(path)
)
os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
|
def _unprotect_file(path):
if System.is_symlink(path) or System.is_hardlink(path):
logger.debug("Unprotecting '{}'".format(path))
tmp = os.path.join(os.path.dirname(path), "." + str(uuid.uuid4()))
# The operations order is important here - if some application
# would access the file during the process of copyfile then it
# would get only the part of file. So, at first, the file should be
# copied with the temporary name, and then original file should be
# replaced by new.
copyfile(
path,
tmp,
name="Unprotecting '{}'".format(os.path.relpath(path)),
)
remove(path)
os.rename(tmp, path)
else:
logger.debug(
"Skipping copying for '{}', since it is not "
"a symlink or a hardlink.".format(path)
)
os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def _create_unpacked_dir(self, checksum, dir_info, unpacked_dir_info):
self.makedirs(unpacked_dir_info)
for entry in progress(dir_info, name="Created unpacked dir"):
entry_cache_info = self.checksum_to_path_info(entry[self.PARAM_CHECKSUM])
relative_path = entry[self.PARAM_RELPATH]
self.link(entry_cache_info, unpacked_dir_info / relative_path, "hardlink")
self.state.save(unpacked_dir_info, checksum)
|
def _create_unpacked_dir(self, checksum, dir_info, unpacked_dir_info):
self.makedirs(unpacked_dir_info)
for entry in progress(dir_info, name="Created unpacked dir"):
entry_cache_info = self.checksum_to_path_info(entry[self.PARAM_CHECKSUM])
relpath = entry[self.PARAM_RELPATH]
self.link(entry_cache_info, unpacked_dir_info / relpath, "hardlink")
self.state.save(unpacked_dir_info, checksum)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def collect(self, target, with_deps=False, recursive=False):
import networkx as nx
from dvc.stage import Stage
if not target or (recursive and os.path.isdir(target)):
return self.active_stages(target)
stage = Stage.load(self, target)
if not with_deps:
return [stage]
node = relpath(stage.path, self.root_dir)
G = self._get_pipeline(node)
ret = []
for n in nx.dfs_postorder_nodes(G, node):
ret.append(G.node[n]["stage"])
return ret
|
def collect(self, target, with_deps=False, recursive=False):
import networkx as nx
from dvc.stage import Stage
if not target or (recursive and os.path.isdir(target)):
return self.active_stages(target)
stage = Stage.load(self, target)
if not with_deps:
return [stage]
node = os.path.relpath(stage.path, self.root_dir)
G = self._get_pipeline(node)
ret = []
for n in nx.dfs_postorder_nodes(G, node):
ret.append(G.node[n]["stage"])
return ret
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def graph(self, stages=None, from_directory=None):
"""Generate a graph by using the given stages on the given directory
The nodes of the graph are the stage's path relative to the root.
Edges are created when the output of one stage is used as a
dependency in other stage.
The direction of the edges goes from the stage to its dependency:
For example, running the following:
$ dvc run -o A "echo A > A"
$ dvc run -d A -o B "echo B > B"
$ dvc run -d B -o C "echo C > C"
Will create the following graph:
ancestors <--
|
C.dvc -> B.dvc -> A.dvc
| |
| --> descendants
|
------- pipeline ------>
|
v
(weakly connected components)
Args:
stages (list): used to build a graph, if None given, use the ones
on the `from_directory`.
from_directory (str): directory where to look at for stages, if
None is given, use the current working directory
Raises:
OutputDuplicationError: two outputs with the same path
StagePathAsOutputError: stage inside an output directory
OverlappingOutputPathsError: output inside output directory
CyclicGraphError: resulting graph has cycles
"""
import networkx as nx
from dvc.exceptions import (
OutputDuplicationError,
StagePathAsOutputError,
OverlappingOutputPathsError,
)
G = nx.DiGraph()
G_active = nx.DiGraph()
stages = stages or self.stages(from_directory, check_dag=False)
stages = [stage for stage in stages if stage]
outs = []
for stage in stages:
for out in stage.outs:
existing = []
for o in outs:
if o.path_info == out.path_info:
existing.append(o.stage)
in_o_dir = out.path_info.isin(o.path_info)
in_out_dir = o.path_info.isin(out.path_info)
if in_o_dir or in_out_dir:
raise OverlappingOutputPathsError(o, out)
if existing:
stages = [stage.relpath, existing[0].relpath]
raise OutputDuplicationError(str(out), stages)
outs.append(out)
for stage in stages:
stage_path_info = PathInfo(stage.path)
for out in outs:
if stage_path_info.isin(out.path_info):
raise StagePathAsOutputError(stage.wdir, stage.relpath)
for stage in stages:
node = relpath(stage.path, self.root_dir)
G.add_node(node, stage=stage)
G_active.add_node(node, stage=stage)
for dep in stage.deps:
for out in outs:
if (
out.path_info != dep.path_info
and not dep.path_info.isin(out.path_info)
and not out.path_info.isin(dep.path_info)
):
continue
dep_stage = out.stage
dep_node = relpath(dep_stage.path, self.root_dir)
G.add_node(dep_node, stage=dep_stage)
G.add_edge(node, dep_node)
if not stage.locked:
G_active.add_node(dep_node, stage=dep_stage)
G_active.add_edge(node, dep_node)
self._check_cyclic_graph(G)
return G, G_active
|
def graph(self, stages=None, from_directory=None):
"""Generate a graph by using the given stages on the given directory
The nodes of the graph are the stage's path relative to the root.
Edges are created when the output of one stage is used as a
dependency in other stage.
The direction of the edges goes from the stage to its dependency:
For example, running the following:
$ dvc run -o A "echo A > A"
$ dvc run -d A -o B "echo B > B"
$ dvc run -d B -o C "echo C > C"
Will create the following graph:
ancestors <--
|
C.dvc -> B.dvc -> A.dvc
| |
| --> descendants
|
------- pipeline ------>
|
v
(weakly connected components)
Args:
stages (list): used to build a graph, if None given, use the ones
on the `from_directory`.
from_directory (str): directory where to look at for stages, if
None is given, use the current working directory
Raises:
OutputDuplicationError: two outputs with the same path
StagePathAsOutputError: stage inside an output directory
OverlappingOutputPathsError: output inside output directory
CyclicGraphError: resulting graph has cycles
"""
import networkx as nx
from dvc.exceptions import (
OutputDuplicationError,
StagePathAsOutputError,
OverlappingOutputPathsError,
)
G = nx.DiGraph()
G_active = nx.DiGraph()
stages = stages or self.stages(from_directory, check_dag=False)
stages = [stage for stage in stages if stage]
outs = []
for stage in stages:
for out in stage.outs:
existing = []
for o in outs:
if o.path_info == out.path_info:
existing.append(o.stage)
in_o_dir = out.path_info.isin(o.path_info)
in_out_dir = o.path_info.isin(out.path_info)
if in_o_dir or in_out_dir:
raise OverlappingOutputPathsError(o, out)
if existing:
stages = [stage.relpath, existing[0].relpath]
raise OutputDuplicationError(str(out), stages)
outs.append(out)
for stage in stages:
stage_path_info = PathInfo(stage.path)
for out in outs:
if stage_path_info.isin(out.path_info):
raise StagePathAsOutputError(stage.wdir, stage.relpath)
for stage in stages:
node = os.path.relpath(stage.path, self.root_dir)
G.add_node(node, stage=stage)
G_active.add_node(node, stage=stage)
for dep in stage.deps:
for out in outs:
if (
out.path_info != dep.path_info
and not dep.path_info.isin(out.path_info)
and not out.path_info.isin(dep.path_info)
):
continue
dep_stage = out.stage
dep_node = os.path.relpath(dep_stage.path, self.root_dir)
G.add_node(dep_node, stage=dep_stage)
G.add_edge(node, dep_node)
if not stage.locked:
G_active.add_node(dep_node, stage=dep_stage)
G_active.add_edge(node, dep_node)
self._check_cyclic_graph(G)
return G, G_active
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def init(root_dir=os.curdir, no_scm=False, force=False):
"""
Creates an empty repo on the given directory -- basically a
`.dvc` directory with subdirectories for configuration and cache.
It should be tracked by a SCM or use the `--no-scm` flag.
If the given directory is not empty, you must use the `--force`
flag to override it.
Args:
root_dir: Path to repo's root directory.
Returns:
Repo instance.
Raises:
KeyError: Raises an exception.
"""
root_dir = os.path.realpath(root_dir)
dvc_dir = os.path.join(root_dir, Repo.DVC_DIR)
scm = SCM(root_dir)
if isinstance(scm, NoSCM) and not no_scm:
raise InitError(
"{repo} is not tracked by any supported scm tool (e.g. git). "
"Use '--no-scm' if you don't want to use any scm.".format(repo=root_dir)
)
if os.path.isdir(dvc_dir):
if not force:
raise InitError(
"'{repo}' exists. Use '-f' to force.".format(repo=relpath(dvc_dir))
)
shutil.rmtree(dvc_dir)
os.mkdir(dvc_dir)
config = Config.init(dvc_dir)
proj = Repo(root_dir)
scm.add([config.config_file])
if scm.ignore_file:
scm.add([os.path.join(dvc_dir, scm.ignore_file)])
logger.info("\nYou can now commit the changes to git.\n")
_welcome_message()
return proj
|
def init(root_dir=os.curdir, no_scm=False, force=False):
"""
Creates an empty repo on the given directory -- basically a
`.dvc` directory with subdirectories for configuration and cache.
It should be tracked by a SCM or use the `--no-scm` flag.
If the given directory is not empty, you must use the `--force`
flag to override it.
Args:
root_dir: Path to repo's root directory.
Returns:
Repo instance.
Raises:
KeyError: Raises an exception.
"""
root_dir = os.path.realpath(root_dir)
dvc_dir = os.path.join(root_dir, Repo.DVC_DIR)
scm = SCM(root_dir)
if isinstance(scm, NoSCM) and not no_scm:
raise InitError(
"{repo} is not tracked by any supported scm tool (e.g. git). "
"Use '--no-scm' if you don't want to use any scm.".format(repo=root_dir)
)
if os.path.isdir(dvc_dir):
if not force:
raise InitError(
"'{repo}' exists. Use '-f' to force.".format(
repo=os.path.relpath(dvc_dir)
)
)
shutil.rmtree(dvc_dir)
os.mkdir(dvc_dir)
config = Config.init(dvc_dir)
proj = Repo(root_dir)
scm.add([config.config_file])
if scm.ignore_file:
scm.add([os.path.join(dvc_dir, scm.ignore_file)])
logger.info("\nYou can now commit the changes to git.\n")
_welcome_message()
return proj
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def reproduce(
self,
target=None,
single_item=False,
force=False,
dry=False,
interactive=False,
pipeline=False,
all_pipelines=False,
ignore_build_cache=False,
no_commit=False,
downstream=False,
recursive=False,
):
import networkx as nx
from dvc.stage import Stage
if not target and not all_pipelines:
raise ValueError()
if not interactive:
config = self.config
core = config.config[config.SECTION_CORE]
interactive = core.get(config.SECTION_CORE_INTERACTIVE, False)
targets = []
if recursive and os.path.isdir(target):
G = self.graph(from_directory=target)[1]
dir_targets = [
os.path.join(self.root_dir, n) for n in nx.dfs_postorder_nodes(G)
]
targets.extend(dir_targets)
elif pipeline or all_pipelines:
if pipeline:
stage = Stage.load(self, target)
node = relpath(stage.path, self.root_dir)
pipelines = [self._get_pipeline(node)]
else:
pipelines = self.pipelines()
for G in pipelines:
for node in G.nodes():
if G.in_degree(node) == 0:
targets.append(os.path.join(self.root_dir, node))
else:
targets.append(target)
ret = []
with self.state:
for target in targets:
stages = _reproduce(
self,
target,
single_item=single_item,
force=force,
dry=dry,
interactive=interactive,
ignore_build_cache=ignore_build_cache,
no_commit=no_commit,
downstream=downstream,
)
ret.extend(stages)
return ret
|
def reproduce(
self,
target=None,
single_item=False,
force=False,
dry=False,
interactive=False,
pipeline=False,
all_pipelines=False,
ignore_build_cache=False,
no_commit=False,
downstream=False,
recursive=False,
):
import networkx as nx
from dvc.stage import Stage
if not target and not all_pipelines:
raise ValueError()
if not interactive:
config = self.config
core = config.config[config.SECTION_CORE]
interactive = core.get(config.SECTION_CORE_INTERACTIVE, False)
targets = []
if recursive and os.path.isdir(target):
G = self.graph(from_directory=target)[1]
dir_targets = [
os.path.join(self.root_dir, n) for n in nx.dfs_postorder_nodes(G)
]
targets.extend(dir_targets)
elif pipeline or all_pipelines:
if pipeline:
stage = Stage.load(self, target)
node = os.path.relpath(stage.path, self.root_dir)
pipelines = [self._get_pipeline(node)]
else:
pipelines = self.pipelines()
for G in pipelines:
for node in G.nodes():
if G.in_degree(node) == 0:
targets.append(os.path.join(self.root_dir, node))
else:
targets.append(target)
ret = []
with self.state:
for target in targets:
stages = _reproduce(
self,
target,
single_item=single_item,
force=force,
dry=dry,
interactive=interactive,
ignore_build_cache=ignore_build_cache,
no_commit=no_commit,
downstream=downstream,
)
ret.extend(stages)
return ret
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def _reproduce(
self,
target,
single_item=False,
force=False,
dry=False,
interactive=False,
ignore_build_cache=False,
no_commit=False,
downstream=False,
):
import networkx as nx
from dvc.stage import Stage
stage = Stage.load(self, target)
G = self.graph()[1]
stages = nx.get_node_attributes(G, "stage")
node = relpath(stage.path, self.root_dir)
if single_item:
ret = _reproduce_stage(stages, node, force, dry, interactive, no_commit)
else:
ret = _reproduce_stages(
G,
stages,
node,
force,
dry,
interactive,
ignore_build_cache,
no_commit,
downstream,
)
return ret
|
def _reproduce(
self,
target,
single_item=False,
force=False,
dry=False,
interactive=False,
ignore_build_cache=False,
no_commit=False,
downstream=False,
):
import networkx as nx
from dvc.stage import Stage
stage = Stage.load(self, target)
G = self.graph()[1]
stages = nx.get_node_attributes(G, "stage")
node = os.path.relpath(stage.path, self.root_dir)
if single_item:
ret = _reproduce_stage(stages, node, force, dry, interactive, no_commit)
else:
ret = _reproduce_stages(
G,
stages,
node,
force,
dry,
interactive,
ignore_build_cache,
no_commit,
downstream,
)
return ret
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def _get_gitignore(self, path, ignore_file_dir=None):
if not ignore_file_dir:
ignore_file_dir = os.path.dirname(os.path.realpath(path))
assert os.path.isabs(path)
assert os.path.isabs(ignore_file_dir)
if not path.startswith(ignore_file_dir):
msg = (
"{} file has to be located in one of '{}' subdirectories, not outside '{}'"
)
raise FileNotInTargetSubdirError(
msg.format(self.GITIGNORE, path, ignore_file_dir)
)
entry = relpath(path, ignore_file_dir).replace(os.sep, "/")
# NOTE: using '/' prefix to make path unambiguous
if len(entry) > 0 and entry[0] != "/":
entry = "/" + entry
gitignore = os.path.join(ignore_file_dir, self.GITIGNORE)
if not gitignore.startswith(os.path.realpath(self.root_dir)):
raise FileNotInRepoError(path)
return entry, gitignore
|
def _get_gitignore(self, path, ignore_file_dir=None):
if not ignore_file_dir:
ignore_file_dir = os.path.dirname(os.path.realpath(path))
assert os.path.isabs(path)
assert os.path.isabs(ignore_file_dir)
if not path.startswith(ignore_file_dir):
msg = (
"{} file has to be located in one of '{}' subdirectories, not outside '{}'"
)
raise FileNotInTargetSubdirError(
msg.format(self.GITIGNORE, path, ignore_file_dir)
)
entry = os.path.relpath(path, ignore_file_dir).replace(os.sep, "/")
# NOTE: using '/' prefix to make path unambiguous
if len(entry) > 0 and entry[0] != "/":
entry = "/" + entry
gitignore = os.path.join(ignore_file_dir, self.GITIGNORE)
if not gitignore.startswith(os.path.realpath(self.root_dir)):
raise FileNotInRepoError(path)
return entry, gitignore
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def ignore(self, path, in_curr_dir=False):
base_dir = os.path.realpath(os.curdir) if in_curr_dir else os.path.dirname(path)
entry, gitignore = self._get_gitignore(path, base_dir)
if self._ignored(entry, gitignore):
return
msg = "Adding '{}' to '{}'.".format(relpath(path), relpath(gitignore))
logger.info(msg)
self._add_entry_to_gitignore(entry, gitignore)
self.track_file(relpath(gitignore))
self.ignored_paths.append(path)
|
def ignore(self, path, in_curr_dir=False):
base_dir = os.path.realpath(os.curdir) if in_curr_dir else os.path.dirname(path)
entry, gitignore = self._get_gitignore(path, base_dir)
if self._ignored(entry, gitignore):
return
msg = "Adding '{}' to '{}'.".format(
os.path.relpath(path), os.path.relpath(gitignore)
)
logger.info(msg)
self._add_entry_to_gitignore(entry, gitignore)
self.track_file(os.path.relpath(gitignore))
self.ignored_paths.append(path)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def ignore_remove(self, path):
entry, gitignore = self._get_gitignore(path)
if not os.path.exists(gitignore):
return
with open(gitignore, "r") as fobj:
lines = fobj.readlines()
filtered = list(filter(lambda x: x.strip() != entry.strip(), lines))
with open(gitignore, "w") as fobj:
fobj.writelines(filtered)
self.track_file(relpath(gitignore))
|
def ignore_remove(self, path):
entry, gitignore = self._get_gitignore(path)
if not os.path.exists(gitignore):
return
with open(gitignore, "r") as fobj:
lines = fobj.readlines()
filtered = list(filter(lambda x: x.strip() != entry.strip(), lines))
with open(gitignore, "w") as fobj:
fobj.writelines(filtered)
self.track_file(os.path.relpath(gitignore))
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def is_tracked(self, path):
# it is equivalent to `bool(self.git.git.ls_files(path))` by
# functionality, but ls_files fails on unicode filenames
path = relpath(path, self.root_dir)
return path in [i[0] for i in self.git.index.entries]
|
def is_tracked(self, path):
# it is equivalent to `bool(self.git.git.ls_files(path))` by
# functionality, but ls_files fails on unicode filenames
path = os.path.relpath(path, self.root_dir)
return path in [i[0] for i in self.git.index.entries]
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def open(self, path, binary=False):
relative_path = relpath(path, self.git.working_dir)
obj = self.git_object_by_path(path)
if obj is None:
msg = "No such file in branch '{}'".format(self.rev)
raise IOError(errno.ENOENT, msg, relative_path)
if obj.mode == GIT_MODE_DIR:
raise IOError(errno.EISDIR, "Is a directory", relative_path)
# GitPython's obj.data_stream is a fragile thing, it is better to
# read it immediately, also it needs to be to decoded if we follow
# the `open()` behavior (since data_stream.read() returns bytes,
# and `open` with default "r" mode returns str)
data = obj.data_stream.read()
if binary:
return BytesIO(data)
return StringIO(data.decode("utf-8"))
|
def open(self, path, binary=False):
relpath = os.path.relpath(path, self.git.working_dir)
obj = self.git_object_by_path(path)
if obj is None:
msg = "No such file in branch '{}'".format(self.rev)
raise IOError(errno.ENOENT, msg, relpath)
if obj.mode == GIT_MODE_DIR:
raise IOError(errno.EISDIR, "Is a directory", relpath)
# GitPython's obj.data_stream is a fragile thing, it is better to
# read it immediately, also it needs to be to decoded if we follow
# the `open()` behavior (since data_stream.read() returns bytes,
# and `open` with default "r" mode returns str)
data = obj.data_stream.read()
if binary:
return BytesIO(data)
return StringIO(data.decode("utf-8"))
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def git_object_by_path(self, path):
import git
path = relpath(os.path.realpath(path), self.git.working_dir)
assert path.split(os.sep, 1)[0] != ".."
self._try_fetch_from_remote()
try:
tree = self.git.tree(self.rev)
except git.exc.BadName as exc:
raise DvcException(
"revision '{}' not found in git '{}'".format(
self.rev, os.path.relpath(self.git.working_dir)
),
cause=exc,
)
if not path or path == ".":
return tree
for i in path.split(os.sep):
if not self._is_tree_and_contains(tree, i):
# there is no tree for specified path
return None
tree = tree[i]
return tree
|
def git_object_by_path(self, path):
import git
path = os.path.relpath(os.path.realpath(path), self.git.working_dir)
assert path.split(os.sep, 1)[0] != ".."
self._try_fetch_from_remote()
try:
tree = self.git.tree(self.rev)
except git.exc.BadName as exc:
raise DvcException(
"revision '{}' not found in git '{}'".format(
self.rev, os.path.relpath(self.git.working_dir)
),
cause=exc,
)
if not path or path == ".":
return tree
for i in path.split(os.sep):
if not self._is_tree_and_contains(tree, i):
# there is no tree for specified path
return None
tree = tree[i]
return tree
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def relpath(self):
return relpath(self.path)
|
def relpath(self):
return os.path.relpath(self.path)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def _check_dvc_filename(fname):
if not Stage.is_valid_filename(fname):
raise StageFileBadNameError(
"bad stage filename '{}'. Stage files should be named"
" 'Dvcfile' or have a '.dvc' suffix (e.g. '{}.dvc').".format(
relpath(fname), os.path.basename(fname)
)
)
|
def _check_dvc_filename(fname):
if not Stage.is_valid_filename(fname):
raise StageFileBadNameError(
"bad stage filename '{}'. Stage files should be named"
" 'Dvcfile' or have a '.dvc' suffix (e.g. '{}.dvc').".format(
os.path.relpath(fname), os.path.basename(fname)
)
)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def load(repo, fname):
fname, tag = Stage._get_path_tag(fname)
# it raises the proper exceptions by priority:
# 1. when the file doesn't exists
# 2. filename is not a DVC-file
# 3. path doesn't represent a regular file
Stage._check_file_exists(repo, fname)
Stage._check_dvc_filename(fname)
Stage._check_isfile(repo, fname)
with repo.tree.open(fname) as fd:
d = load_stage_fd(fd, fname)
# Making a deepcopy since the original structure
# looses keys in deps and outs load
state = copy.deepcopy(d)
Stage.validate(d, fname=relpath(fname))
path = os.path.abspath(fname)
stage = Stage(
repo=repo,
path=path,
wdir=os.path.abspath(
os.path.join(os.path.dirname(path), d.get(Stage.PARAM_WDIR, "."))
),
cmd=d.get(Stage.PARAM_CMD),
md5=d.get(Stage.PARAM_MD5),
locked=d.get(Stage.PARAM_LOCKED, False),
tag=tag,
state=state,
)
stage.deps = dependency.loadd_from(stage, d.get(Stage.PARAM_DEPS, []))
stage.outs = output.loadd_from(stage, d.get(Stage.PARAM_OUTS, []))
return stage
|
def load(repo, fname):
fname, tag = Stage._get_path_tag(fname)
# it raises the proper exceptions by priority:
# 1. when the file doesn't exists
# 2. filename is not a DVC-file
# 3. path doesn't represent a regular file
Stage._check_file_exists(repo, fname)
Stage._check_dvc_filename(fname)
Stage._check_isfile(repo, fname)
with repo.tree.open(fname) as fd:
d = load_stage_fd(fd, fname)
# Making a deepcopy since the original structure
# looses keys in deps and outs load
state = copy.deepcopy(d)
Stage.validate(d, fname=os.path.relpath(fname))
path = os.path.abspath(fname)
stage = Stage(
repo=repo,
path=path,
wdir=os.path.abspath(
os.path.join(os.path.dirname(path), d.get(Stage.PARAM_WDIR, "."))
),
cmd=d.get(Stage.PARAM_CMD),
md5=d.get(Stage.PARAM_MD5),
locked=d.get(Stage.PARAM_LOCKED, False),
tag=tag,
state=state,
)
stage.deps = dependency.loadd_from(stage, d.get(Stage.PARAM_DEPS, []))
stage.outs = output.loadd_from(stage, d.get(Stage.PARAM_OUTS, []))
return stage
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def dumpd(self):
rel_wdir = relpath(self.wdir, os.path.dirname(self.path))
return {
key: value
for key, value in {
Stage.PARAM_MD5: self.md5,
Stage.PARAM_CMD: self.cmd,
Stage.PARAM_WDIR: pathlib.PurePath(rel_wdir).as_posix(),
Stage.PARAM_LOCKED: self.locked,
Stage.PARAM_DEPS: [d.dumpd() for d in self.deps],
Stage.PARAM_OUTS: [o.dumpd() for o in self.outs],
Stage.PARAM_META: self._state.get("meta"),
}.items()
if value
}
|
def dumpd(self):
rel_wdir = os.path.relpath(self.wdir, os.path.dirname(self.path))
return {
key: value
for key, value in {
Stage.PARAM_MD5: self.md5,
Stage.PARAM_CMD: self.cmd,
Stage.PARAM_WDIR: pathlib.PurePath(rel_wdir).as_posix(),
Stage.PARAM_LOCKED: self.locked,
Stage.PARAM_DEPS: [d.dumpd() for d in self.deps],
Stage.PARAM_OUTS: [o.dumpd() for o in self.outs],
Stage.PARAM_META: self._state.get("meta"),
}.items()
if value
}
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def dump(self):
fname = self.path
self._check_dvc_filename(fname)
logger.info("Saving information to '{file}'.".format(file=relpath(fname)))
d = self.dumpd()
apply_diff(d, self._state)
dump_stage_file(fname, self._state)
self.repo.scm.track_file(relpath(fname))
|
def dump(self):
fname = self.path
self._check_dvc_filename(fname)
logger.info("Saving information to '{file}'.".format(file=os.path.relpath(fname)))
d = self.dumpd()
apply_diff(d, self._state)
dump_stage_file(fname, self._state)
self.repo.scm.track_file(os.path.relpath(fname))
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def save_link(self, path_info):
"""Adds the specified path to the list of links created by dvc. This
list is later used on `dvc checkout` to cleanup old links.
Args:
path_info (dict): path info to add to the list of links.
"""
assert path_info.scheme == "local"
path = fspath_py35(path_info)
if not os.path.exists(path):
return
mtime, _ = get_mtime_and_size(path)
inode = get_inode(path)
relative_path = relpath(path, self.root_dir)
cmd = 'REPLACE INTO {}(path, inode, mtime) VALUES ("{}", {}, "{}")'.format(
self.LINK_STATE_TABLE,
relative_path,
self._to_sqlite(inode),
mtime,
)
self._execute(cmd)
|
def save_link(self, path_info):
"""Adds the specified path to the list of links created by dvc. This
list is later used on `dvc checkout` to cleanup old links.
Args:
path_info (dict): path info to add to the list of links.
"""
assert path_info.scheme == "local"
path = fspath_py35(path_info)
if not os.path.exists(path):
return
mtime, _ = get_mtime_and_size(path)
inode = get_inode(path)
relpath = os.path.relpath(path, self.root_dir)
cmd = 'REPLACE INTO {}(path, inode, mtime) VALUES ("{}", {}, "{}")'.format(
self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime
)
self._execute(cmd)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def file_md5(fname):
"""get the (md5 hexdigest, md5 digest) of a file"""
from dvc.progress import progress
from dvc.istextfile import istextfile
if os.path.exists(fname):
hash_md5 = hashlib.md5()
binary = not istextfile(fname)
size = os.path.getsize(fname)
bar = False
if size >= LARGE_FILE_SIZE:
bar = True
msg = "Computing md5 for a large file {}. This is only done once."
logger.info(msg.format(relpath(fname)))
name = relpath(fname)
total = 0
with open(fname, "rb") as fobj:
while True:
data = fobj.read(LOCAL_CHUNK_SIZE)
if not data:
break
if bar:
total += len(data)
progress.update_target(name, total, size)
if binary:
chunk = data
else:
chunk = dos2unix(data)
hash_md5.update(chunk)
if bar:
progress.finish_target(name)
return (hash_md5.hexdigest(), hash_md5.digest())
else:
return (None, None)
|
def file_md5(fname):
"""get the (md5 hexdigest, md5 digest) of a file"""
from dvc.progress import progress
from dvc.istextfile import istextfile
if os.path.exists(fname):
hash_md5 = hashlib.md5()
binary = not istextfile(fname)
size = os.path.getsize(fname)
bar = False
if size >= LARGE_FILE_SIZE:
bar = True
msg = "Computing md5 for a large file {}. This is only done once."
logger.info(msg.format(os.path.relpath(fname)))
name = os.path.relpath(fname)
total = 0
with open(fname, "rb") as fobj:
while True:
data = fobj.read(LOCAL_CHUNK_SIZE)
if not data:
break
if bar:
total += len(data)
progress.update_target(name, total, size)
if binary:
chunk = data
else:
chunk = dos2unix(data)
hash_md5.update(chunk)
if bar:
progress.finish_target(name)
return (hash_md5.hexdigest(), hash_md5.digest())
else:
return (None, None)
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def remove(path):
logger.debug("Removing '{}'".format(relpath(path)))
try:
if os.path.isdir(path):
shutil.rmtree(path, onerror=_chmod)
else:
_chmod(os.unlink, path, None)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
|
def remove(path):
logger.debug("Removing '{}'".format(os.path.relpath(path)))
try:
if os.path.isdir(path):
shutil.rmtree(path, onerror=_chmod)
else:
_chmod(os.unlink, path, None)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
|
https://github.com/iterative/dvc/issues/2119
|
ERROR: failed to pull data from the cloud - path is on mount 'F:', start on mount 'C:'
Traceback (most recent call last):
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\command\data_sync.py", line 46, in
do_run recursive=self.args.recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\pull.py", line 24, in pull
recursive=recursive,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\repo\fetch.py", line 27, in fetch
used, jobs, remote=remote, show_checksums=show_checksums
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\data_cloud.py", line 144, in pull
show_checksums=show_checksums,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 493, in pull
download=True,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 459, in _process
download=download,
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 358, in status
local_exists = self.cache_exists(md5s)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in cache_exists
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 232, in <lambda>
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 518, in changed_cache_file
actual = self.get_checksum(cache_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\base.py", line 275, in get_checksum
checksum = self.get_file_checksum(path_info)
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\remote\local\__init__.py", line 208, in get_file_checksum
return file_md5(path_info.path)[0]
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\site-packages\dvc\utils\__init__.py", line 48, in file_md5 logger.info(msg.format(os.path.relpath(fname)))
File "c:\users\srg\appdata\local\continuum\anaconda3\envs\tf-keras\lib\ntpath.py", line 584, in relpath
path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:'
|
ValueError
|
def __init__(self, args):
super(CmdCacheDir, self).__init__(args)
self.cache_config = CacheConfig(self.config)
|
def __init__(self, args):
super(CmdCacheDir, self).__init__(args)
self.config = CacheConfig(self.config)
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def run(self):
self.cache_config.set_dir(self.args.value, level=self.args.level)
return 0
|
def run(self):
self.config.set_dir(self.args.value, level=self.args.level)
return 0
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def __init__(self, args):
super(CmdRemoteConfig, self).__init__(args)
self.remote_config = RemoteConfig(self.config)
|
def __init__(self, args):
super(CmdRemoteConfig, self).__init__(args)
self.config = RemoteConfig(self.config)
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def run(self):
self.remote_config.add(
self.args.name,
self.args.url,
force=self.args.force,
default=self.args.default,
level=self.args.level,
)
return 0
|
def run(self):
self.config.add(
self.args.name,
self.args.url,
force=self.args.force,
default=self.args.default,
level=self.args.level,
)
return 0
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def run(self):
self.remote_config.remove(self.args.name, level=self.args.level)
return 0
|
def run(self):
self.config.remove(self.args.name, level=self.args.level)
return 0
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def run(self):
self.remote_config.modify(
self.args.name,
self.args.option,
self.args.value,
level=self.args.level,
)
return 0
|
def run(self):
self.config.modify(
self.args.name,
self.args.option,
self.args.value,
level=self.args.level,
)
return 0
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def run(self):
self.remote_config.set_default(
self.args.name, unset=self.args.unset, level=self.args.level
)
return 0
|
def run(self):
self.config.set_default(
self.args.name, unset=self.args.unset, level=self.args.level
)
return 0
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def run(self):
for name, url in self.remote_config.list(level=self.args.level).items():
logger.info("{}\t{}".format(name, url))
return 0
|
def run(self):
for name, url in self.config.list(level=self.args.level).items():
logger.info("{}\t{}".format(name, url))
return 0
|
https://github.com/iterative/dvc/issues/2110
|
Traceback (most recent call last):
File "/home/prd/development/tools/venvs/dvc3.6/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/prd/development/projects/dvc/dvc/main.py", line 57, in main
Analytics().send_cmd(cmd, args, ret)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 256, in send_cmd
if not Analytics._is_enabled(cmd):
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 240, in _is_enabled
enabled = Analytics._is_enabled_config(config)
File "/home/prd/development/projects/dvc/dvc/analytics.py", line 212, in _is_enabled_config
core = config.config.get(Config.SECTION_CORE, {})
File "/home/prd/development/projects/dvc/dvc/config.py", line 563, in get
"option '{}.{}' doesn't exist".format(section, opt)
dvc.config.ConfigError: config file error: option 'core.{}' doesn't exist
|
dvc.config.ConfigError
|
def __init__(self, stage, path, info=None, remote=None):
super(DependencyHTTP, self).__init__(stage, path, info=info, remote=remote)
if path.startswith("remote"):
path = urljoin(self.remote.cache_dir, urlparse(path).path)
self.path_info = PathInfo(self.scheme, url=self.url, path=path)
|
def __init__(self, stage, path, info=None, remote=None):
super(DependencyHTTP, self).__init__(stage, path, info=info, remote=remote)
if path.startswith("remote"):
path = urljoin(self.remote.cache_dir, urlparse(path).path)
self.path_info = HTTPPathInfo(url=self.url, path=path)
|
https://github.com/iterative/dvc/issues/1988
|
(base) ➜ example git:(master) ✗ dvc import -v https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Skipping copying for '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz', since it is not a symlink or a hardlink.
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652524846561024", "")
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Removing output 'mibig_json_1.4.tar.gz' of 'mibig_json_1.4.tar.gz.dvc'.
DEBUG: Removing 'mibig_json_1.4.tar.gz'
Importing 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' -> '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
DEBUG: Computed stage 'mibig_json_1.4.tar.gz.dvc' md5: '9d1b61169e8c0795fcf31de916db584b'
DEBUG: Downloading 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' to '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
[##############################] 100% mibig_json_1.4.tar.gz
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652527775110912", "")
ERROR: unexpected error
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/main.py", line 38, in main
ret = cmd.run_cmd()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/base.py", line 60, in run_cmd
return self.run()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/imp.py", line 23, in run
self.args.url, out, self.args.resume, fname=self.args.file
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/imp.py", line 19, in imp
stage.run(resume=resume)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 834, in run
self.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 689, in save
dep.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/output/base.py", line 211, in save
self.info = self.remote.save_info(self.path_info)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/remote/base.py", line 283, in save_info
assert path_info.scheme == self.scheme
AssertionError
------------------------------------------------------------
Having any troubles?. Hit us up at https://dvc.org/support, we are always happy to help!
|
AssertionError
|
def __init__(self, repo, config):
super(RemoteHTTP, self).__init__(repo, config)
self.cache_dir = config.get(Config.SECTION_REMOTE_URL)
self.url = self.cache_dir
self.path_info = PathInfo(self.scheme)
|
def __init__(self, repo, config):
super(RemoteHTTP, self).__init__(repo, config)
self.cache_dir = config.get(Config.SECTION_REMOTE_URL)
self.url = self.cache_dir
self.path_info = HTTPPathInfo()
|
https://github.com/iterative/dvc/issues/1988
|
(base) ➜ example git:(master) ✗ dvc import -v https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Skipping copying for '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz', since it is not a symlink or a hardlink.
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652524846561024", "")
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Removing output 'mibig_json_1.4.tar.gz' of 'mibig_json_1.4.tar.gz.dvc'.
DEBUG: Removing 'mibig_json_1.4.tar.gz'
Importing 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' -> '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
DEBUG: Computed stage 'mibig_json_1.4.tar.gz.dvc' md5: '9d1b61169e8c0795fcf31de916db584b'
DEBUG: Downloading 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' to '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
[##############################] 100% mibig_json_1.4.tar.gz
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652527775110912", "")
ERROR: unexpected error
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/main.py", line 38, in main
ret = cmd.run_cmd()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/base.py", line 60, in run_cmd
return self.run()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/imp.py", line 23, in run
self.args.url, out, self.args.resume, fname=self.args.file
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/imp.py", line 19, in imp
stage.run(resume=resume)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 834, in run
self.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 689, in save
dep.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/output/base.py", line 211, in save
self.info = self.remote.save_info(self.path_info)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/remote/base.py", line 283, in save_info
assert path_info.scheme == self.scheme
AssertionError
------------------------------------------------------------
Having any troubles?. Hit us up at https://dvc.org/support, we are always happy to help!
|
AssertionError
|
def download(
self,
from_infos,
to_infos,
no_progress_bar=False,
names=None,
resume=False,
):
names = self._verify_path_args(to_infos, from_infos, names)
for to_info, from_info, name in zip(to_infos, from_infos, names):
if from_info.scheme != self.scheme:
raise NotImplementedError
if to_info.scheme != "local":
raise NotImplementedError
msg = "Downloading '{}' to '{}'".format(from_info.path, to_info.path)
logger.debug(msg)
if not name:
name = os.path.basename(to_info.path)
makedirs(os.path.dirname(to_info.path), exist_ok=True)
total = self._content_length(from_info.path)
if no_progress_bar or not total:
cb = None
else:
cb = ProgressBarCallback(name, total)
try:
self._download_to(from_info.path, to_info.path, callback=cb, resume=resume)
except Exception:
msg = "failed to download '{}'".format(from_info.path)
logger.exception(msg)
continue
if not no_progress_bar:
progress.finish_target(name)
|
def download(
self,
from_infos,
to_infos,
no_progress_bar=False,
names=None,
resume=False,
):
names = self._verify_path_args(to_infos, from_infos, names)
for to_info, from_info, name in zip(to_infos, from_infos, names):
if from_info.scheme not in ["http", "https"]:
raise NotImplementedError
if to_info.scheme != "local":
raise NotImplementedError
msg = "Downloading '{}' to '{}'".format(from_info.path, to_info.path)
logger.debug(msg)
if not name:
name = os.path.basename(to_info.path)
makedirs(os.path.dirname(to_info.path), exist_ok=True)
total = self._content_length(from_info.path)
if no_progress_bar or not total:
cb = None
else:
cb = ProgressBarCallback(name, total)
try:
self._download_to(from_info.path, to_info.path, callback=cb, resume=resume)
except Exception:
msg = "failed to download '{}'".format(from_info.path)
logger.exception(msg)
continue
if not no_progress_bar:
progress.finish_target(name)
|
https://github.com/iterative/dvc/issues/1988
|
(base) ➜ example git:(master) ✗ dvc import -v https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Skipping copying for '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz', since it is not a symlink or a hardlink.
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652524846561024", "")
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Removing output 'mibig_json_1.4.tar.gz' of 'mibig_json_1.4.tar.gz.dvc'.
DEBUG: Removing 'mibig_json_1.4.tar.gz'
Importing 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' -> '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
DEBUG: Computed stage 'mibig_json_1.4.tar.gz.dvc' md5: '9d1b61169e8c0795fcf31de916db584b'
DEBUG: Downloading 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' to '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
[##############################] 100% mibig_json_1.4.tar.gz
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652527775110912", "")
ERROR: unexpected error
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/main.py", line 38, in main
ret = cmd.run_cmd()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/base.py", line 60, in run_cmd
return self.run()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/imp.py", line 23, in run
self.args.url, out, self.args.resume, fname=self.args.file
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/imp.py", line 19, in imp
stage.run(resume=resume)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 834, in run
self.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 689, in save
dep.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/output/base.py", line 211, in save
self.info = self.remote.save_info(self.path_info)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/remote/base.py", line 283, in save_info
assert path_info.scheme == self.scheme
AssertionError
------------------------------------------------------------
Having any troubles?. Hit us up at https://dvc.org/support, we are always happy to help!
|
AssertionError
|
def exists(self, path_info):
assert not isinstance(path_info, list)
assert path_info.scheme == self.scheme
return bool(self._request("HEAD", path_info.path))
|
def exists(self, path_info):
assert not isinstance(path_info, list)
assert path_info.scheme in ["http", "https"]
return bool(self._request("HEAD", path_info.path))
|
https://github.com/iterative/dvc/issues/1988
|
(base) ➜ example git:(master) ✗ dvc import -v https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Skipping copying for '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz', since it is not a symlink or a hardlink.
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652524846561024", "")
DEBUG: PRAGMA user_version;
DEBUG: fetched: [(3,)]
DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
DEBUG: PRAGMA user_version = 3;
DEBUG: Removing output 'mibig_json_1.4.tar.gz' of 'mibig_json_1.4.tar.gz.dvc'.
DEBUG: Removing 'mibig_json_1.4.tar.gz'
Importing 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' -> '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
DEBUG: Computed stage 'mibig_json_1.4.tar.gz.dvc' md5: '9d1b61169e8c0795fcf31de916db584b'
DEBUG: Downloading 'https://mibig.secondarymetabolites.org/mibig_json_1.4.tar.gz' to '/Users/prihodad/Documents/projects/dvc2-example/example/mibig_json_1.4.tar.gz'
[##############################] 100% mibig_json_1.4.tar.gz
DEBUG: SELECT count from state_info WHERE rowid=1
DEBUG: fetched: [(6,)]
DEBUG: UPDATE state_info SET count = 6 WHERE rowid = 1
DEBUG: Path /Users/prihodad/Documents/projects/dvc2-example/.dvc/cache inode 259931721
DEBUG: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (259931721, "1408117", "1557652462000000000", "1557652527775110912", "")
ERROR: unexpected error
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/main.py", line 38, in main
ret = cmd.run_cmd()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/base.py", line 60, in run_cmd
return self.run()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/command/imp.py", line 23, in run
self.args.url, out, self.args.resume, fname=self.args.file
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/scm_context.py", line 4, in run
result = method(repo, *args, **kw)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/repo/imp.py", line 19, in imp
stage.run(resume=resume)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 834, in run
self.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/stage.py", line 689, in save
dep.save()
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/output/base.py", line 211, in save
self.info = self.remote.save_info(self.path_info)
File "/Users/prihodad/anaconda3/lib/python3.6/site-packages/dvc/remote/base.py", line 283, in save_info
assert path_info.scheme == self.scheme
AssertionError
------------------------------------------------------------
Having any troubles?. Hit us up at https://dvc.org/support, we are always happy to help!
|
AssertionError
|
def _walk(
self,
tree,
topdown=True,
ignore_file_handler=None,
dvc_ignore_filter=None,
):
dirs, nondirs = [], []
for i in tree:
if i.mode == GIT_MODE_DIR:
dirs.append(i.name)
else:
nondirs.append(i.name)
if topdown:
if not dvc_ignore_filter:
dvc_ignore_filter = DvcIgnoreFilter(
tree.abspath, ignore_file_handler=ignore_file_handler
)
dirs, nondirs = dvc_ignore_filter(tree.path, dirs, nondirs)
yield os.path.normpath(tree.abspath), dirs, nondirs
for i in dirs:
for x in self._walk(
tree[i],
topdown=True,
ignore_file_handler=ignore_file_handler,
dvc_ignore_filter=dvc_ignore_filter,
):
yield x
if not topdown:
yield os.path.normpath(tree.abspath), dirs, nondirs
|
def _walk(
self,
tree,
topdown=True,
ignore_file_handler=None,
dvc_ignore_filter=None,
):
dirs, nondirs = [], []
for i in tree:
if i.mode == GIT_MODE_DIR:
dirs.append(i.name)
else:
nondirs.append(i.name)
if topdown:
if not dvc_ignore_filter:
dvc_ignore_filter = DvcIgnoreFilter(
tree.abspath, ignore_file_handler=ignore_file_handler
)
dirs, nondirs = dvc_ignore_filter(tree.path, dirs, nondirs)
yield os.path.normpath(tree.path), dirs, nondirs
for i in dirs:
for x in self._walk(
tree[i],
topdown=True,
ignore_file_handler=ignore_file_handler,
dvc_ignore_filter=dvc_ignore_filter,
):
yield x
if not topdown:
yield os.path.normpath(tree.path), dirs, nondirs
|
https://github.com/iterative/dvc/issues/1891
|
dvc metrics show -a -v
ERROR: failed to show metrics - 'data/data-2010-2016.h5.dvc' does not exist.
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/command/metrics.py", line 47, in run
recursive=self.args.recursive,
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/metrics/__init__.py", line 18, in show
return show(self.repo, *args, **kwargs)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/metrics/show.py", line 262, in show
entries = _collect_metrics(self, path, recursive, typ, xpath, branch)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/metrics/show.py", line 168, in _collect_metrics
outs = [out for stage in self.stages() for out in stage.outs]
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/__init__.py", line 398, in stages
stage = Stage.load(self, path)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/stage.py", line 564, in load
Stage._check_file_exists(repo, fname)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/stage.py", line 541, in _check_file_exists
raise StageFileDoesNotExistError(fname)
dvc.stage.StageFileDoesNotExistError: 'data/data-2010-2016.h5.dvc' does not exist.
------------------------------------------------------------
Having any troubles?. Hit us up at https://dvc.org/support, we are always happy to help!
DEBUG: Analytics is enabled.
DEBUG: Trying to spawn '['/Users/frag/miniconda3/envs/generic-dev/bin/python', '-m', 'dvc', 'daemon', '-q', 'analytics', '/var/folders/q3/7kp92vs15tjc1gc7413rw5mm0000gn/T/tmp730_03qg']'
DEBUG: Spawned '['/Users/frag/miniconda3/envs/generic-dev/bin/python', '-m', 'dvc', 'daemon', '-q', 'analytics', '/var/folders/q3/7kp92vs15tjc1gc7413rw5mm0000gn/T/tmp730_03qg']'
|
dvc.stage.StageFileDoesNotExistError
|
def walk(self, top, topdown=True, ignore_file_handler=None):
"""Directory tree generator.
See `os.walk` for the docs. Differences:
- no support for symlinks
- it could raise exceptions, there is no onerror argument
"""
def onerror(e):
raise e
for root, dirs, files in dvc_walk(
os.path.abspath(top),
topdown=topdown,
onerror=onerror,
ignore_file_handler=ignore_file_handler,
):
yield os.path.normpath(root), dirs, files
|
def walk(self, top, topdown=True, ignore_file_handler=None):
"""Directory tree generator.
See `os.walk` for the docs. Differences:
- no support for symlinks
- it could raise exceptions, there is no onerror argument
"""
def onerror(e):
raise e
for root, dirs, files in dvc_walk(
top,
topdown=topdown,
onerror=onerror,
ignore_file_handler=ignore_file_handler,
):
yield os.path.normpath(root), dirs, files
|
https://github.com/iterative/dvc/issues/1891
|
dvc metrics show -a -v
ERROR: failed to show metrics - 'data/data-2010-2016.h5.dvc' does not exist.
------------------------------------------------------------
Traceback (most recent call last):
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/command/metrics.py", line 47, in run
recursive=self.args.recursive,
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/metrics/__init__.py", line 18, in show
return show(self.repo, *args, **kwargs)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/metrics/show.py", line 262, in show
entries = _collect_metrics(self, path, recursive, typ, xpath, branch)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/metrics/show.py", line 168, in _collect_metrics
outs = [out for stage in self.stages() for out in stage.outs]
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/repo/__init__.py", line 398, in stages
stage = Stage.load(self, path)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/stage.py", line 564, in load
Stage._check_file_exists(repo, fname)
File "/Users/frag/miniconda3/envs/generic-dev/lib/python3.6/site-packages/dvc/stage.py", line 541, in _check_file_exists
raise StageFileDoesNotExistError(fname)
dvc.stage.StageFileDoesNotExistError: 'data/data-2010-2016.h5.dvc' does not exist.
------------------------------------------------------------
Having any troubles?. Hit us up at https://dvc.org/support, we are always happy to help!
DEBUG: Analytics is enabled.
DEBUG: Trying to spawn '['/Users/frag/miniconda3/envs/generic-dev/bin/python', '-m', 'dvc', 'daemon', '-q', 'analytics', '/var/folders/q3/7kp92vs15tjc1gc7413rw5mm0000gn/T/tmp730_03qg']'
DEBUG: Spawned '['/Users/frag/miniconda3/envs/generic-dev/bin/python', '-m', 'dvc', 'daemon', '-q', 'analytics', '/var/folders/q3/7kp92vs15tjc1gc7413rw5mm0000gn/T/tmp730_03qg']'
|
dvc.stage.StageFileDoesNotExistError
|
def __init__(self, root_dir=None):
from dvc.config import Config
from dvc.state import State
from dvc.lock import Lock
from dvc.scm import SCM
from dvc.cache import Cache
from dvc.data_cloud import DataCloud
from dvc.updater import Updater
from dvc.repo.metrics import Metrics
from dvc.scm.tree import WorkingTree
root_dir = self.find_root(root_dir)
self.root_dir = os.path.abspath(os.path.realpath(root_dir))
self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)
self.config = Config(self.dvc_dir)
self.tree = WorkingTree()
self.scm = SCM(self.root_dir, repo=self)
self.lock = Lock(self.dvc_dir)
# NOTE: storing state and link_state in the repository itself to avoid
# any possible state corruption in 'shared cache dir' scenario.
self.state = State(self, self.config.config)
core = self.config.config[Config.SECTION_CORE]
logger.set_level(core.get(Config.SECTION_CORE_LOGLEVEL))
self.cache = Cache(self)
self.cloud = DataCloud(self, config=self.config.config)
self.updater = Updater(self.dvc_dir)
self.metrics = Metrics(self)
self._ignore()
self.updater.check()
|
def __init__(self, root_dir=None):
from dvc.config import Config
from dvc.state import State
from dvc.lock import Lock
from dvc.scm import SCM
from dvc.cache import Cache
from dvc.data_cloud import DataCloud
from dvc.updater import Updater
from dvc.repo.metrics import Metrics
from dvc.repo.tree import WorkingTree
root_dir = self.find_root(root_dir)
self.root_dir = os.path.abspath(os.path.realpath(root_dir))
self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)
self.config = Config(self.dvc_dir)
self.tree = WorkingTree()
self.scm = SCM(self.root_dir, repo=self)
self.lock = Lock(self.dvc_dir)
# NOTE: storing state and link_state in the repository itself to avoid
# any possible state corruption in 'shared cache dir' scenario.
self.state = State(self, self.config.config)
core = self.config.config[Config.SECTION_CORE]
logger.set_level(core.get(Config.SECTION_CORE_LOGLEVEL))
self.cache = Cache(self)
self.cloud = DataCloud(self, config=self.config.config)
self.updater = Updater(self.dvc_dir)
self.metrics = Metrics(self)
self._ignore()
self.updater.check()
|
https://github.com/iterative/dvc/issues/1769
|
python -c "import dvc.scm"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "dvc/scm/__init__.py", line 6, in <module>
from dvc.scm.git import Git
File "dvc/scm/git/__init__.py", line 15, in <module>
from dvc.scm.git.tree import GitTree
File "dvc/scm/git/tree.py", line 6, in <module>
from dvc.repo.tree import BaseTree
File "dvc/repo/__init__.py", line 10, in <module>
class Repo(object):
File "dvc/repo/__init__.py", line 15, in Repo
from dvc.repo.add import add
File "dvc/repo/add.py", line 3, in <module>
from dvc.scm import scm_context
ImportError: cannot import name scm_context
|
ImportError
|
def brancher( # noqa: E302
self, branches=None, all_branches=False, tags=None, all_tags=False
):
"""Generator that iterates over specified revisions.
Args:
branches (list): a list of branches to iterate over.
all_branches (bool): iterate over all available branches.
tags (list): a list of tags to iterate over.
all_tags (bool): iterate over all available tags.
Yields:
str: the display name for the currently selected tree, it could be:
- a git revision identifier
- empty string it there is no branches to iterate over
- "Working Tree" if there are uncommited changes in the SCM repo
"""
if not any([branches, all_branches, tags, all_tags]):
yield ""
return
saved_tree = self.tree
revs = []
scm = self.scm
if self.scm.is_dirty():
from dvc.scm.tree import WorkingTree
self.tree = WorkingTree()
yield "Working Tree"
if all_branches:
branches = scm.list_branches()
if all_tags:
tags = scm.list_tags()
if branches is None:
revs.extend([scm.active_branch()])
else:
revs.extend(branches)
if tags is not None:
revs.extend(tags)
# NOTE: it might be a good idea to wrap this loop in try/finally block
# to don't leave the tree on some unexpected branch after the
# `brancher()`, but this could cause problems on exception handling
# code which might expect the tree on which exception was raised to
# stay in place. This behavior is a subject to change.
for rev in revs:
self.tree = scm.get_tree(rev)
yield rev
self.tree = saved_tree
|
def brancher( # noqa: E302
self, branches=None, all_branches=False, tags=None, all_tags=False
):
"""Generator that iterates over specified revisions.
Args:
branches (list): a list of branches to iterate over.
all_branches (bool): iterate over all available branches.
tags (list): a list of tags to iterate over.
all_tags (bool): iterate over all available tags.
Yields:
str: the display name for the currently selected tree, it could be:
- a git revision identifier
- empty string it there is no branches to iterate over
- "Working Tree" if there are uncommited changes in the SCM repo
"""
if not any([branches, all_branches, tags, all_tags]):
yield ""
return
saved_tree = self.tree
revs = []
scm = self.scm
if self.scm.is_dirty():
from dvc.repo.tree import WorkingTree
self.tree = WorkingTree()
yield "Working Tree"
if all_branches:
branches = scm.list_branches()
if all_tags:
tags = scm.list_tags()
if branches is None:
revs.extend([scm.active_branch()])
else:
revs.extend(branches)
if tags is not None:
revs.extend(tags)
# NOTE: it might be a good idea to wrap this loop in try/finally block
# to don't leave the tree on some unexpected branch after the
# `brancher()`, but this could cause problems on exception handling
# code which might expect the tree on which exception was raised to
# stay in place. This behavior is a subject to change.
for rev in revs:
self.tree = scm.get_tree(rev)
yield rev
self.tree = saved_tree
|
https://github.com/iterative/dvc/issues/1769
|
python -c "import dvc.scm"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "dvc/scm/__init__.py", line 6, in <module>
from dvc.scm.git import Git
File "dvc/scm/git/__init__.py", line 15, in <module>
from dvc.scm.git.tree import GitTree
File "dvc/scm/git/tree.py", line 6, in <module>
from dvc.repo.tree import BaseTree
File "dvc/repo/__init__.py", line 10, in <module>
class Repo(object):
File "dvc/repo/__init__.py", line 15, in Repo
from dvc.repo.add import add
File "dvc/repo/add.py", line 3, in <module>
from dvc.scm import scm_context
ImportError: cannot import name scm_context
|
ImportError
|
def get_state_record_for_inode(self, inode):
cmd = "SELECT mtime, size, md5, timestamp from {} WHERE inode={}"
cmd = cmd.format(self.STATE_TABLE, self._to_sqlite(inode))
self._execute(cmd)
results = self._fetchall()
if results:
# uniquness constrain on inode
assert len(results) == 1
return results[0]
return None
|
def get_state_record_for_inode(self, inode):
cmd = "SELECT mtime, size, md5, timestamp from {} WHERE inode={}"
cmd = cmd.format(self.STATE_TABLE, inode)
self._execute(cmd)
results = self._fetchall()
if results:
# uniquness constrain on inode
assert len(results) == 1
return results[0]
return None
|
https://github.com/iterative/dvc/issues/1707
|
dvc run -f test.dvc -d train "python -c 'print()'"
Debug: PRAGMA user_version;
Debug: fetched: [(3,)]
Debug: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
Debug: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
Debug: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
Debug: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
Debug: PRAGMA user_version = 3;
Debug: Path /home/greg/src/atp/train inode 12123207390858225349
Debug: SELECT mtime, size, md5, timestamp from state WHERE inode=12123207390858225349
Debug: fetched: []
Debug: Path /home/greg/src/atp/train/Dockerfile inode 10698461848281723475
Debug: SELECT mtime, size, md5, timestamp from state WHERE inode=10698461848281723475
Debug: fetched: []
Debug: INSERT INTO state(inode, mtime, size, md5, timestamp) VALUES (-1475089811426947668, "1552167285616000000", "318", "ad609def7b88b8145a8e74c6cb622597", "1552172098641832448")
Debug: SELECT count from state_info WHERE rowid=1
Debug: fetched: [(53,)]
Debug: UPDATE state_info SET count = 53 WHERE rowid = 1
Debug: Path /tmp/dvc_cache inode 519743
Debug: INSERT OR REPLACE INTO state(inode, size, mtime, timestamp, md5) VALUES (519743, "22099", "1552171956826519296", "1552172098646104832", "")
Error: unexpected error - UNIQUE constraint failed: state.inode
------------------------------------------------------------
Traceback (most recent call last):
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/main.py", line 32, in main
ret = cmd.run_cmd()
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/command/base.py", line 53, in run_cmd
return self.run()
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/command/run.py", line 45, in run
no_commit=self.args.no_commit,
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/repo/run.py", line 48, in run
remove_outs=remove_outs,
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/stage.py", line 462, in create
if not ignore_build_cache and stage.is_cached:
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/stage.py", line 366, in is_cached
dep.save()
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/output/local.py", line 99, in save
self.info = self.remote.save_info(self.path_info)
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/remote/local.py", line 451, in save_info
return {self.PARAM_CHECKSUM: self.state.update(path_info["path"])}
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 431, in update
return self._do_update(path, known_checksum)[0]
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 320, in _do_update
path, actual_inode, actual_mtime, actual_size, known_checksum
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 391, in _insert_new_state_record
md5, info = self._collect(path)
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 114, in _collect
return self.repo.cache.local.collect_dir_cache(path)
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/remote/local.py", line 226, in collect_dir_cache
md5 = self.state.update(path)
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 431, in update
return self._do_update(path, known_checksum)[0]
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 320, in _do_update
path, actual_inode, actual_mtime, actual_size, known_checksum
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 403, in _insert_new_state_record
current_timestamp(),
File "/home/greg/.virtualenv/dvc/lib/python3.6/site-packages/dvc/state.py", line 139, in _execute
return self.cursor.execute(cmd)
sqlite3.IntegrityError: UNIQUE constraint failed: state.inode
------------------------------------------------------------
|
sqlite3.IntegrityError
|
def _do_draw(self, screen): # pragma: no cover
from asciimatics.event import KeyboardEvent
offset_x = 0
offset_y = 0
smaxrow, smaxcol = screen.dimensions
assert smaxrow > 1
assert smaxcol > 1
smaxrow -= 1
smaxcol -= 1
if self.lines + 1 > smaxrow:
max_y = self.lines + 1 - smaxrow
else:
max_y = 0
if self.cols + 1 > smaxcol:
max_x = self.cols + 1 - smaxcol
else:
max_x = 0
while True:
for y in range(smaxrow + 1):
y_index = offset_y + y
line = []
for x in range(smaxcol + 1):
x_index = offset_x + x
if len(self.canvas) > y_index and len(self.canvas[y_index]) > x_index:
line.append(self.canvas[y_index][x_index])
else:
line.append(" ")
assert len(line) == (smaxcol + 1)
screen.print_at("".join(line), 0, y)
screen.refresh()
# NOTE: get_event() doesn't block by itself,
# so we have to do the blocking ourselves.
#
# NOTE: using formally private method while waiting for PR [1]
# to get merged. After that need to adjust asciimatics version
# requirements.
#
# [1] https://github.com/peterbrittain/asciimatics/pull/188
screen._wait_for_input(self.TIMEOUT)
event = screen.get_event()
if not isinstance(event, KeyboardEvent):
continue
k = event.key_code
if k == screen.KEY_DOWN or k == ord("s"):
offset_y += 1
elif k == screen.KEY_PAGE_DOWN or k == ord("S"):
offset_y += smaxrow
elif k == screen.KEY_UP or k == ord("w"):
offset_y -= 1
elif k == screen.KEY_PAGE_UP or k == ord("W"):
offset_y -= smaxrow
elif k == screen.KEY_RIGHT or k == ord("d"):
offset_x += 1
elif k == ord("D"):
offset_x += smaxcol
elif k == screen.KEY_LEFT or k == ord("a"):
offset_x -= 1
elif k == ord("A"):
offset_x -= smaxcol
elif k == ord("q") or k == ord("Q"):
break
if offset_y > max_y:
offset_y = max_y
elif offset_y < 0:
offset_y = 0
if offset_x > max_x:
offset_x = max_x
elif offset_x < 0:
offset_x = 0
|
def _do_draw(self, screen): # pragma: no cover
from asciimatics.event import KeyboardEvent
offset_x = 0
offset_y = 0
smaxrow, smaxcol = screen.dimensions
assert smaxrow > 1
assert smaxcol > 1
smaxrow -= 1
smaxcol -= 1
if self.lines + 1 > smaxrow:
max_y = self.lines + 1 - smaxrow
else:
max_y = 0
if self.cols + 1 > smaxcol:
max_x = self.cols + 1 - smaxcol
else:
max_x = 0
while True:
for y in range(smaxrow + 1):
y_index = offset_y + y
line = []
for x in range(smaxcol + 1):
x_index = offset_x + x
if len(self.canvas) > y_index and len(self.canvas[y_index]) > x_index:
line.append(self.canvas[y_index][x_index])
else:
line.append(" ")
assert len(line) == (smaxcol + 1)
screen.print_at("".join(line), 0, y)
screen.refresh()
# NOTE: get_event() doesn't block by itself,
# so we have to do the blocking ourselves.
select.select([sys.stdin], [], [], None)
event = screen.get_event()
if not isinstance(event, KeyboardEvent):
continue
k = event.key_code
if k == screen.KEY_DOWN or k == ord("s"):
offset_y += 1
elif k == screen.KEY_PAGE_DOWN or k == ord("S"):
offset_y += smaxrow
elif k == screen.KEY_UP or k == ord("w"):
offset_y -= 1
elif k == screen.KEY_PAGE_UP or k == ord("W"):
offset_y -= smaxrow
elif k == screen.KEY_RIGHT or k == ord("d"):
offset_x += 1
elif k == ord("D"):
offset_x += smaxcol
elif k == screen.KEY_LEFT or k == ord("a"):
offset_x -= 1
elif k == ord("A"):
offset_x -= smaxcol
elif k == ord("q") or k == ord("Q"):
break
if offset_y > max_y:
offset_y = max_y
elif offset_y < 0:
offset_y = 0
if offset_x > max_x:
offset_x = max_x
elif offset_x < 0:
offset_x = 0
|
https://github.com/iterative/dvc/issues/1359
|
λ dvc pipeline show --ascii -v
Error: Traceback (most recent call last):
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\main.py", line 22, in main
ret = cmd.run_cmd()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\command\base.py", line 41, in run_cmd
return self.run()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\command\pipeline.py", line 98, in run
self.args.outs)
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\command\pipeline.py", line 79, in _show_ascii
d.draw()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\dagascii.py", line 278, in draw
canvas.draw()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\dagascii.py", line 23, in draw
Screen.wrapper(self._do_draw)
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\asciimatics\screen.py", line 1170, in wrapper
return func(screen)
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\dagascii.py", line 67, in _do_draw
select.select([sys.stdin], [], [], None)
OSError: [WinError 10038] An operation was attempted on something that is not a socket
Error: Unexpected error: [WinError 10038] An operation was attempted on something that is not a socket
|
OSError
|
def _do_draw(self, screen): # pragma: no cover
from dvc.system import System
from asciimatics.event import KeyboardEvent
offset_x = 0
offset_y = 0
smaxrow, smaxcol = screen.dimensions
assert smaxrow > 1
assert smaxcol > 1
smaxrow -= 1
smaxcol -= 1
if self.lines + 1 > smaxrow:
max_y = self.lines + 1 - smaxrow
else:
max_y = 0
if self.cols + 1 > smaxcol:
max_x = self.cols + 1 - smaxcol
else:
max_x = 0
while True:
for y in range(smaxrow + 1):
y_index = offset_y + y
line = []
for x in range(smaxcol + 1):
x_index = offset_x + x
if len(self.canvas) > y_index and len(self.canvas[y_index]) > x_index:
line.append(self.canvas[y_index][x_index])
else:
line.append(" ")
assert len(line) == (smaxcol + 1)
screen.print_at("".join(line), 0, y)
screen.refresh()
# NOTE: get_event() doesn't block by itself,
# so we have to do the blocking ourselves.
#
# NOTE: using this workaround while waiting for PR [1]
# to get merged and released. After that need to adjust
# asciimatics version requirements.
#
# [1] https://github.com/peterbrittain/asciimatics/pull/188
System.wait_for_input(self.TIMEOUT)
event = screen.get_event()
if not isinstance(event, KeyboardEvent):
continue
k = event.key_code
if k == screen.KEY_DOWN or k == ord("s"):
offset_y += 1
elif k == screen.KEY_PAGE_DOWN or k == ord("S"):
offset_y += smaxrow
elif k == screen.KEY_UP or k == ord("w"):
offset_y -= 1
elif k == screen.KEY_PAGE_UP or k == ord("W"):
offset_y -= smaxrow
elif k == screen.KEY_RIGHT or k == ord("d"):
offset_x += 1
elif k == ord("D"):
offset_x += smaxcol
elif k == screen.KEY_LEFT or k == ord("a"):
offset_x -= 1
elif k == ord("A"):
offset_x -= smaxcol
elif k == ord("q") or k == ord("Q"):
break
if offset_y > max_y:
offset_y = max_y
elif offset_y < 0:
offset_y = 0
if offset_x > max_x:
offset_x = max_x
elif offset_x < 0:
offset_x = 0
|
def _do_draw(self, screen): # pragma: no cover
from asciimatics.event import KeyboardEvent
offset_x = 0
offset_y = 0
smaxrow, smaxcol = screen.dimensions
assert smaxrow > 1
assert smaxcol > 1
smaxrow -= 1
smaxcol -= 1
if self.lines + 1 > smaxrow:
max_y = self.lines + 1 - smaxrow
else:
max_y = 0
if self.cols + 1 > smaxcol:
max_x = self.cols + 1 - smaxcol
else:
max_x = 0
while True:
for y in range(smaxrow + 1):
y_index = offset_y + y
line = []
for x in range(smaxcol + 1):
x_index = offset_x + x
if len(self.canvas) > y_index and len(self.canvas[y_index]) > x_index:
line.append(self.canvas[y_index][x_index])
else:
line.append(" ")
assert len(line) == (smaxcol + 1)
screen.print_at("".join(line), 0, y)
screen.refresh()
# NOTE: get_event() doesn't block by itself,
# so we have to do the blocking ourselves.
#
# NOTE: using formally private method while waiting for PR [1]
# to get merged. After that need to adjust asciimatics version
# requirements.
#
# [1] https://github.com/peterbrittain/asciimatics/pull/188
screen._wait_for_input(self.TIMEOUT)
event = screen.get_event()
if not isinstance(event, KeyboardEvent):
continue
k = event.key_code
if k == screen.KEY_DOWN or k == ord("s"):
offset_y += 1
elif k == screen.KEY_PAGE_DOWN or k == ord("S"):
offset_y += smaxrow
elif k == screen.KEY_UP or k == ord("w"):
offset_y -= 1
elif k == screen.KEY_PAGE_UP or k == ord("W"):
offset_y -= smaxrow
elif k == screen.KEY_RIGHT or k == ord("d"):
offset_x += 1
elif k == ord("D"):
offset_x += smaxcol
elif k == screen.KEY_LEFT or k == ord("a"):
offset_x -= 1
elif k == ord("A"):
offset_x -= smaxcol
elif k == ord("q") or k == ord("Q"):
break
if offset_y > max_y:
offset_y = max_y
elif offset_y < 0:
offset_y = 0
if offset_x > max_x:
offset_x = max_x
elif offset_x < 0:
offset_x = 0
|
https://github.com/iterative/dvc/issues/1359
|
λ dvc pipeline show --ascii -v
Error: Traceback (most recent call last):
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\main.py", line 22, in main
ret = cmd.run_cmd()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\command\base.py", line 41, in run_cmd
return self.run()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\command\pipeline.py", line 98, in run
self.args.outs)
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\command\pipeline.py", line 79, in _show_ascii
d.draw()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\dagascii.py", line 278, in draw
canvas.draw()
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\dagascii.py", line 23, in draw
Screen.wrapper(self._do_draw)
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\asciimatics\screen.py", line 1170, in wrapper
return func(screen)
File "c:\users\klahrichi\appdata\local\continuum\anaconda3\lib\site-packages\dvc\dagascii.py", line 67, in _do_draw
select.select([sys.stdin], [], [], None)
OSError: [WinError 10038] An operation was attempted on something that is not a socket
Error: Unexpected error: [WinError 10038] An operation was attempted on something that is not a socket
|
OSError
|
def load(project, fname):
if not os.path.exists(fname):
Stage._check_dvc_file(fname)
raise StageFileDoesNotExistError(fname)
Stage._check_dvc_filename(fname)
if not Stage.is_stage_file(fname):
Stage._check_dvc_file(fname)
raise StageFileIsNotDvcFileError(fname)
with open(fname, "r") as fd:
return Stage.loadd(project, yaml.safe_load(fd) or dict(), fname)
|
def load(project, fname):
if not os.path.exists(fname):
Stage._check_dvc_file(fname)
raise StageFileDoesNotExistError(fname)
Stage._check_dvc_filename(fname)
if not Stage.is_stage_file(fname):
Stage._check_dvc_file(fname)
raise StageFileIsNotDvcFileError(fname)
with open(fname, "r") as fd:
return Stage.loadd(project, yaml.safe_load(fd), fname)
|
https://github.com/iterative/dvc/issues/1339
|
Error: Traceback (most recent call last):
File "/home/efiop/.virtualenvs/dvc/lib/python2.7/site-packages/dvc/command/run.py", line 18, in run
no_exec=self.args.no_exec)
File "/home/efiop/.virtualenvs/dvc/lib/python2.7/site-packages/dvc/project.py", line 319, in run
self._check_output_duplication(stage.outs)
File "/home/efiop/.virtualenvs/dvc/lib/python2.7/site-packages/dvc/project.py", line 155, in _check_out
put_duplication
for stage in self.stages():
File "/home/efiop/.virtualenvs/dvc/lib/python2.7/site-packages/dvc/project.py", line 1106, in stages
stage = Stage.load(self, path)
File "/home/efiop/.virtualenvs/dvc/lib/python2.7/site-packages/dvc/stage.py", line 366, in load
return Stage.loadd(project, yaml.safe_load(fd), fname)
File "/home/efiop/.virtualenvs/dvc/lib/python2.7/site-packages/dvc/stage.py", line 243, in loadd
Stage.validate(d, fname=os.path.relpath(path))
File "/home/efiop/.virtualenvs/dvc/lib/python2.7/site-packages/dvc/stage.py", line 239, in validate
raise StageFileFormatError(fname, exc)
StageFileFormatError: Stage file 'Dvcfile' format error: None should be instance of 'dict'
Error: Failed to run command: Stage file 'Dvcfile' format error: None should be instance of 'dict'
Having any troubles? Hit us up at dvc.org/support, we are always happy to help!
|
StageFileFormatError
|
def hadoop_fs(self, cmd, user=None):
cmd = "hadoop fs -" + cmd
if user:
cmd = "HADOOP_USER_NAME={} ".format(user) + cmd
# NOTE: close_fds doesn't work with redirected stdin/stdout/stderr.
# See https://github.com/iterative/dvc/issues/1197.
close_fds = os.name != "nt"
executable = os.getenv("SHELL") if os.name != "nt" else None
p = Popen(
cmd,
shell=True,
close_fds=close_fds,
executable=executable,
env=fix_env(os.environ),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
raise DvcException("HDFS command failed: {}: {}".format(cmd, err))
return out.decode("utf-8")
|
def hadoop_fs(self, cmd, user=None):
cmd = "hadoop fs -" + cmd
if user:
cmd = "HADOOP_USER_NAME={} ".format(user) + cmd
# NOTE: close_fds doesn't work with redirected stdin/stdout/stderr.
# See https://github.com/iterative/dvc/issues/1197.
close_fds = os.name != "nt"
p = Popen(
cmd,
shell=True,
close_fds=close_fds,
executable=os.getenv("SHELL"),
env=fix_env(os.environ),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
raise DvcException("HDFS command failed: {}: {}".format(cmd, err))
return out.decode("utf-8")
|
https://github.com/iterative/dvc/issues/1238
|
$ dvc run -v -d data/Posts.xml.tgz -o data/Posts.xml tar zxf data/Posts.xml.tgz -C data/
Debug: updater is not old enough to check for updates
Debug: PRAGMA user_version;
Debug: fetched: [(2,)]
Debug: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
Debug: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
Debug: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
Debug: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
Debug: PRAGMA user_version = 2;
Running command:
tar zxf data/Posts.xml.tgz -C data/
/c: /c: Is a directory
Debug: SELECT count from state_info WHERE rowid=1
Debug: fetched: [(1,)]
Debug: UPDATE state_info SET count = 1 WHERE rowid = 1
Error: Traceback (most recent call last):
File "dvc\command\run.py", line 18, in run
File "dvc\project.py", line 265, in run
File "dvc\stage.py", line 435, in run
StageCmdFailedError: Stage 'Posts.xml.dvc' cmd tar zxf data/Posts.xml.tgz -C data/ failed
Error: Failed to run command: Stage 'Posts.xml.dvc' cmd tar zxf data/Posts.xml.tgz -C data/ failed
|
StageCmdFailedError
|
def run(self, dry=False):
if self.locked:
msg = "Verifying outputs in locked stage '{}'"
self.project.logger.info(msg.format(self.relpath))
if not dry:
self.check_missing_outputs()
elif self.is_import:
msg = "Importing '{}' -> '{}'"
self.project.logger.info(msg.format(self.deps[0].path, self.outs[0].path))
if not dry:
self.deps[0].download(self.outs[0].path_info)
elif self.is_data_source:
msg = "Verifying data sources in '{}'".format(self.relpath)
self.project.logger.info(msg)
if not dry:
self.check_missing_outputs()
else:
msg = "Running command:\n\t{}".format(self.cmd)
self.project.logger.info(msg)
if not dry:
self._check_missing_deps()
executable = os.getenv("SHELL") if os.name != "nt" else None
p = subprocess.Popen(
self.cmd,
cwd=self.cwd,
shell=True,
env=fix_env(os.environ),
executable=executable,
)
p.communicate()
if p.returncode != 0:
raise StageCmdFailedError(self)
if not dry:
self.save()
|
def run(self, dry=False):
if self.locked:
msg = "Verifying outputs in locked stage '{}'"
self.project.logger.info(msg.format(self.relpath))
if not dry:
self.check_missing_outputs()
elif self.is_import:
msg = "Importing '{}' -> '{}'"
self.project.logger.info(msg.format(self.deps[0].path, self.outs[0].path))
if not dry:
self.deps[0].download(self.outs[0].path_info)
elif self.is_data_source:
msg = "Verifying data sources in '{}'".format(self.relpath)
self.project.logger.info(msg)
if not dry:
self.check_missing_outputs()
else:
msg = "Running command:\n\t{}".format(self.cmd)
self.project.logger.info(msg)
if not dry:
self._check_missing_deps()
p = subprocess.Popen(
self.cmd,
cwd=self.cwd,
shell=True,
env=fix_env(os.environ),
executable=os.getenv("SHELL"),
)
p.communicate()
if p.returncode != 0:
raise StageCmdFailedError(self)
if not dry:
self.save()
|
https://github.com/iterative/dvc/issues/1238
|
$ dvc run -v -d data/Posts.xml.tgz -o data/Posts.xml tar zxf data/Posts.xml.tgz -C data/
Debug: updater is not old enough to check for updates
Debug: PRAGMA user_version;
Debug: fetched: [(2,)]
Debug: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)
Debug: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)
Debug: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)
Debug: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)
Debug: PRAGMA user_version = 2;
Running command:
tar zxf data/Posts.xml.tgz -C data/
/c: /c: Is a directory
Debug: SELECT count from state_info WHERE rowid=1
Debug: fetched: [(1,)]
Debug: UPDATE state_info SET count = 1 WHERE rowid = 1
Error: Traceback (most recent call last):
File "dvc\command\run.py", line 18, in run
File "dvc\project.py", line 265, in run
File "dvc\stage.py", line 435, in run
StageCmdFailedError: Stage 'Posts.xml.dvc' cmd tar zxf data/Posts.xml.tgz -C data/ failed
Error: Failed to run command: Stage 'Posts.xml.dvc' cmd tar zxf data/Posts.xml.tgz -C data/ failed
|
StageCmdFailedError
|
def hadoop_fs(self, cmd, user=None):
cmd = "hadoop fs -" + cmd
if user:
cmd = "HADOOP_USER_NAME={} ".format(user) + cmd
p = Popen(
cmd,
shell=True,
close_fds=True,
executable=os.getenv("SHELL"),
env=fix_env(os.environ),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
raise DvcException("HDFS command failed: {}: {}".format(cmd, err))
return out.decode("utf-8")
|
def hadoop_fs(self, cmd, user=None):
cmd = "hadoop fs -" + cmd
if user:
cmd = "HADOOP_USER_NAME={} ".format(user) + cmd
p = Popen(
cmd,
shell=True,
close_fds=True,
executable=os.getenv("SHELL"),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
out, err = p.communicate()
if p.returncode != 0:
raise DvcException("HDFS command failed: {}: {}".format(cmd, err))
return out.decode("utf-8")
|
https://github.com/iterative/dvc/issues/965
|
Running command:
python3 test.py
/bin/bash: /tmp/_MEIACeYFM/libtinfo.so.5: no version information available (required by /bin/bash)
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 58, in <module>
from tensorflow.python.pywrap_tensorflow_internal import *
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 28, in <module>
_pywrap_tensorflow_internal = swig_import_helper()
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
File "/usr/lib/python3.5/imp.py", line 242, in load_module
return load_dynamic(name, filename, file)
File "/usr/lib/python3.5/imp.py", line 342, in load_dynamic
return _load(spec)
ImportError: /tmp/_MEIACeYFM/libstdc++.so.6: version `GLIBCXX_3.4.20' not found (required by /usr/local/lib/python3.5/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 1, in <module>
import tensorflow as tf
File "/usr/local/lib/python3.5/dist-packages/tensorflow/__init__.py", line 22, in <module>
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/__init__.py", line 49, in <module>
from tensorflow.python import pywrap_tensorflow
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 74, in <module>
raise ImportError(msg)
ImportError: Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 58, in <module>
from tensorflow.python.pywrap_tensorflow_internal import *
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 28, in <module>
_pywrap_tensorflow_internal = swig_import_helper()
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
File "/usr/lib/python3.5/imp.py", line 242, in load_module
return load_dynamic(name, filename, file)
File "/usr/lib/python3.5/imp.py", line 342, in load_dynamic
return _load(spec)
ImportError: /tmp/_MEIACeYFM/libstdc++.so.6: version `GLIBCXX_3.4.20' not found (required by /usr/local/lib/python3.5/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so)
Failed to load the native TensorFlow runtime.
See https://www.tensorflow.org/install/install_sources#common_installation_problems
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.
Failed to run command: Stage 'Dvcfile' cmd python3 test.py failed
|
ImportError
|
def run(self, dry=False):
if self.locked:
msg = "Verifying outputs in locked stage '{}'"
self.project.logger.info(msg.format(self.relpath))
if not dry:
self.check_missing_outputs()
elif self.is_import:
msg = "Importing '{}' -> '{}'"
self.project.logger.info(msg.format(self.deps[0].path, self.outs[0].path))
if not dry:
self.deps[0].download(self.outs[0].path_info)
elif self.is_data_source:
msg = "Verifying data sources in '{}'".format(self.relpath)
self.project.logger.info(msg)
if not dry:
self.check_missing_outputs()
else:
msg = "Running command:\n\t{}".format(self.cmd)
self.project.logger.info(msg)
if not dry:
p = subprocess.Popen(
self.cmd,
cwd=self.cwd,
shell=True,
env=fix_env(os.environ),
executable=os.getenv("SHELL"),
)
p.communicate()
if p.returncode != 0:
raise StageCmdFailedError(self)
if not dry:
self.save()
|
def run(self, dry=False):
if self.locked:
msg = "Verifying outputs in locked stage '{}'"
self.project.logger.info(msg.format(self.relpath))
if not dry:
self.check_missing_outputs()
elif self.is_import:
msg = "Importing '{}' -> '{}'"
self.project.logger.info(msg.format(self.deps[0].path, self.outs[0].path))
if not dry:
self.deps[0].download(self.outs[0].path_info)
elif self.is_data_source:
msg = "Verifying data sources in '{}'".format(self.relpath)
self.project.logger.info(msg)
if not dry:
self.check_missing_outputs()
else:
msg = "Running command:\n\t{}".format(self.cmd)
self.project.logger.info(msg)
if not dry:
p = subprocess.Popen(
self.cmd,
cwd=self.cwd,
shell=True,
env=os.environ,
executable=os.getenv("SHELL"),
)
p.communicate()
if p.returncode != 0:
raise StageCmdFailedError(self)
if not dry:
self.save()
|
https://github.com/iterative/dvc/issues/965
|
Running command:
python3 test.py
/bin/bash: /tmp/_MEIACeYFM/libtinfo.so.5: no version information available (required by /bin/bash)
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 58, in <module>
from tensorflow.python.pywrap_tensorflow_internal import *
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 28, in <module>
_pywrap_tensorflow_internal = swig_import_helper()
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
File "/usr/lib/python3.5/imp.py", line 242, in load_module
return load_dynamic(name, filename, file)
File "/usr/lib/python3.5/imp.py", line 342, in load_dynamic
return _load(spec)
ImportError: /tmp/_MEIACeYFM/libstdc++.so.6: version `GLIBCXX_3.4.20' not found (required by /usr/local/lib/python3.5/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 1, in <module>
import tensorflow as tf
File "/usr/local/lib/python3.5/dist-packages/tensorflow/__init__.py", line 22, in <module>
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/__init__.py", line 49, in <module>
from tensorflow.python import pywrap_tensorflow
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 74, in <module>
raise ImportError(msg)
ImportError: Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 58, in <module>
from tensorflow.python.pywrap_tensorflow_internal import *
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 28, in <module>
_pywrap_tensorflow_internal = swig_import_helper()
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 24, in swig_import_helper
_mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
File "/usr/lib/python3.5/imp.py", line 242, in load_module
return load_dynamic(name, filename, file)
File "/usr/lib/python3.5/imp.py", line 342, in load_dynamic
return _load(spec)
ImportError: /tmp/_MEIACeYFM/libstdc++.so.6: version `GLIBCXX_3.4.20' not found (required by /usr/local/lib/python3.5/dist-packages/tensorflow/python/_pywrap_tensorflow_internal.so)
Failed to load the native TensorFlow runtime.
See https://www.tensorflow.org/install/install_sources#common_installation_problems
for some common reasons and solutions. Include the entire stack trace
above this error message when asking for help.
Failed to run command: Stage 'Dvcfile' cmd python3 test.py failed
|
ImportError
|
def target_metric_from_git_history(self, hash, symlink_content, target, settings):
cache_rel_to_data = os.path.relpath(
settings.config.cache_dir, settings.config.data_dir
)
common_prefix = os.path.commonprefix([symlink_content, cache_rel_to_data])
cache_file_name = symlink_content[len(common_prefix) :]
if cache_file_name[0] == os.path.sep:
cache_file_name = cache_file_name[1:]
file_name = os.path.join(settings.config.cache_dir, cache_file_name)
full_file_name = os.path.join(self.git_dir_abs, file_name)
if not os.path.exists(full_file_name):
return None
lines = open(full_file_name).readlines(2)
if len(lines) != 1:
msg = "[dvc-git] Target file {} with hash {} has wrong format: {} lines were obtained, 1 expected."
Logger.warn(msg.format(target, hash, len(lines)))
return None
# Extract float from string. I.e. from 'AUC: 0.596182'
num = re.findall("\d+\.\d+", lines[0])
if len(num) != 1:
msg = "[dvc-git] Unable to parse metrics from '{}' file {}"
Logger.warn(msg.format(lines[0], target))
return None
return float(num[0])
|
def target_metric_from_git_history(self, hash, symlink_content, target, settings):
cache_rel_to_data = os.path.relpath(
settings.config.cache_dir, settings.config.data_dir
)
common_prefix = os.path.commonprefix([symlink_content, cache_rel_to_data])
cache_file_name = symlink_content[len(common_prefix) :]
if cache_file_name[0] == os.path.sep:
cache_file_name = cache_file_name[1:]
file_name = os.path.join(settings.config.cache_dir, cache_file_name)
full_file_name = os.path.join(self.git_dir_abs, file_name)
if os.path.exists(full_file_name):
lines = open(full_file_name).readlines(2)
if len(lines) != 1:
msg = "[dvc-git] Target file {} with hash {} has wrong format: {} lines were obtained, 1 expected."
Logger.warn(msg.format(target, hash, len(lines)))
else:
return float(lines[0])
return None
|
https://github.com/iterative/dvc/issues/217
|
(dvc) ➜ myrepo git:(master) dvc show workflow data/evaluation.txtException caught in CmdShowWorkflow
Traceback (most recent call last):
File "/home/efiop/virtenvs/dvc/lib/python2.7/site-packages/dvc/main.py", line 18, in main
ret = instance.run()
File "/home/efiop/virtenvs/dvc/lib/python2.7/site-packages/dvc/command/show_workflow.py", line 17, in run
wf = self.git.get_all_commits(target, self.settings)
File "/home/efiop/virtenvs/dvc/lib/python2.7/site-packages/dvc/git_wrapper.py", line 258, in get_all_commits
branch_tips=branches_multimap.get(hash))
File "/home/efiop/virtenvs/dvc/lib/python2.7/site-packages/dvc/git_wrapper.py", line 272, in is_target
metric = self.target_metric_from_git_history(hash, symlink_content, target, settings)
File "/home/efiop/virtenvs/dvc/lib/python2.7/site-packages/dvc/git_wrapper.py", line 318, in target_metric_from_git_history
return float(lines[0])
ValueError: could not convert string to float: AUC: 0.596182
|
ValueError
|
def parse_args(argv=None):
# Common args
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-q", "--quiet", action="store_true", default=False, help="Be quiet."
)
parent_parser.add_argument(
"-v", "--verbose", action="store_true", default=False, help="Be verbose."
)
parent_parser.add_argument(
"-G",
"--no-git-actions",
action="store_true",
default=False,
help="Skip all git actions including reproducibility check and commits.",
)
desc = "Data Version Control"
parser = argparse.ArgumentParser(
description=desc,
parents=[parent_parser],
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-V", "--version", action="version", version="%(prog)s " + VERSION
)
# Sub commands
subparsers = parser.add_subparsers(
dest="cmd", help="Use dvc CMD --help for command-specific help"
)
# Init
init_parser = subparsers.add_parser(
"init",
parents=[parent_parser],
help="Initialize dvc over a directory (should already be a git dir).",
)
init_parser.add_argument("--data-dir", default="data", help="Data directory.")
init_parser.add_argument("--cache-dir", default=".cache", help="Cache directory.")
init_parser.add_argument("--state-dir", default=".state", help="State directory.")
init_parser.add_argument(
"--target-file", default=Config.TARGET_FILE_DEFAULT, help="Target file."
)
init_parser.set_defaults(func=CmdInit)
# Run
run_parser = subparsers.add_parser(
"run", parents=[parent_parser], help="Run command"
)
run_parser.add_argument("--stdout", help="Output std output to a file.")
run_parser.add_argument("--stderr", help="Output std error to a file.")
run_parser.add_argument(
"-i",
"--input",
action="append",
help="Declare input data items for reproducible cmd.",
)
run_parser.add_argument(
"-o",
"--output",
action="append",
help="Declare output data items for reproducible cmd.",
)
run_parser.add_argument(
"-c",
"--code",
action="append",
help="Code dependencies which produce the output.",
)
run_parser.add_argument(
"--shell", action="store_true", default=False, help="Shell command"
)
run_parser.add_argument(
"-l",
"--lock",
action="store_true",
default=False,
help="Lock data item - disable reproduction.",
)
run_parser.add_argument("command", help="Command to execute")
run_parser.add_argument(
"args", nargs=argparse.REMAINDER, help="Arguments of a command"
)
run_parser.set_defaults(func=CmdRun)
# Sync
sync_parser = subparsers.add_parser(
"sync",
parents=[parent_parser],
help="Synchronize data file with cloud (cloud settings already setup.",
)
sync_parser.add_argument("targets", nargs="+", help="File or directory to sync.")
sync_parser.add_argument(
"-j",
"--jobs",
type=int,
default=cpu_count(),
help="Number of jobs to run simultaneously.",
)
sync_parser.set_defaults(func=CmdDataSync)
# Repro
repro_parser = subparsers.add_parser(
"repro", parents=[parent_parser], help="Reproduce data"
)
repro_parser.add_argument("target", nargs="*", help="Data items to reproduce.")
repro_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Reproduce even if dependencies were not changed.",
)
repro_parser.add_argument(
"-s",
"--single-item",
action="store_true",
default=False,
help="Reproduce only single data item without recursive dependencies check.",
)
repro_parser.set_defaults(func=CmdRepro)
# Remove
remove_parser = subparsers.add_parser(
"remove", parents=[parent_parser], help="Remove data item from data directory."
)
remove_parser.add_argument(
"target", nargs="*", help="Target to remove - file or directory."
)
remove_parser.add_argument(
"-l",
"--keep-in-cloud",
action="store_true",
default=False,
help="Do not remove data from cloud.",
)
remove_parser.add_argument(
"-r", "--recursive", action="store_true", help="Remove directory recursively."
)
remove_parser.add_argument(
"-c",
"--keep-in-cache",
action="store_true",
default=False,
help="Do not remove data from cache.",
)
remove_parser.set_defaults(func=CmdRemove)
# Import
import_parser = subparsers.add_parser(
"import", parents=[parent_parser], help="Import file to data directory."
)
import_parser.add_argument("input", nargs="+", help="Input file/files.")
import_parser.add_argument("output", help="Output file/directory.")
import_parser.add_argument(
"-l",
"--lock",
action="store_true",
default=False,
help="Lock data item - disable reproduction.",
)
import_parser.add_argument(
"-j",
"--jobs",
type=int,
default=cpu_count(),
help="Number of jobs to run simultaneously.",
)
import_parser.set_defaults(func=CmdImportFile)
# Lock
lock_parser = subparsers.add_parser("lock", parents=[parent_parser], help="Lock")
lock_parser.add_argument(
"-l",
"--lock",
action="store_true",
default=False,
help="Lock data item - disable reproduction.",
)
lock_parser.add_argument(
"-u",
"--unlock",
action="store_true",
default=False,
help="Unlock data item - enable reproduction.",
)
lock_parser.add_argument("files", nargs="*", help="Data items to lock or unlock.")
lock_parser.set_defaults(func=CmdLock)
# Garbage collector
gc_parser = subparsers.add_parser(
"gc", parents=[parent_parser], help="Collect garbage"
)
gc_parser.add_argument(
"target", nargs="*", help="Target to remove - file or directory."
)
gc_parser.add_argument(
"-l",
"--keep-in-cloud",
action="store_true",
default=False,
help="Do not remove data from cloud.",
)
gc_parser.add_argument(
"-r", "--recursive", action="store_true", help="Remove directory recursively."
)
gc_parser.add_argument(
"-c",
"--keep-in-cache",
action="store_false",
default=False,
help="Do not remove data from cache.",
)
gc_parser.set_defaults(func=CmdGC)
# Target
target_parser = subparsers.add_parser(
"target", parents=[parent_parser], help="Set default target"
)
target_parser.add_argument("target_file", nargs="?", help="Target data item.")
target_parser.add_argument(
"-u", "--unset", action="store_true", default=False, help="Reset target."
)
target_parser.set_defaults(func=CmdTarget)
if isinstance(argv, str):
argv = argv.split()
return parser.parse_args(argv)
|
def parse_args(argv=None):
# Common args
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-q", "--quiet", action="store_true", default=False, help="Be quiet."
)
parent_parser.add_argument(
"-v", "--verbose", action="store_true", default=False, help="Be verbose."
)
parent_parser.add_argument(
"-G",
"--no-git-actions",
action="store_true",
default=False,
help="Skip all git actions including reproducibility check and commits.",
)
desc = "Data Version Control"
parser = argparse.ArgumentParser(
description=desc,
parents=[parent_parser],
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-V", "--version", action="version", version="%(prog)s " + VERSION
)
# Sub commands
subparsers = parser.add_subparsers(
dest="cmd", help="Use dvc CMD --help for command-specific help"
)
# Init
init_parser = subparsers.add_parser(
"init",
parents=[parent_parser],
help="Initialize dvc over a directory (should already be a git dir).",
)
init_parser.add_argument("--data-dir", default="data", help="Data directory.")
init_parser.add_argument("--cache-dir", default=".cache", help="Cache directory.")
init_parser.add_argument("--state-dir", default=".state", help="State directory.")
init_parser.add_argument(
"--target-file", default=Config.TARGET_FILE_DEFAULT, help="Target file."
)
init_parser.set_defaults(func=CmdInit)
# Run
run_parser = subparsers.add_parser(
"run", parents=[parent_parser], help="Run command"
)
run_parser.add_argument("--stdout", help="Output std output to a file.")
run_parser.add_argument("--stderr", help="Output std error to a file.")
run_parser.add_argument(
"-i",
"--input",
action="append",
help="Declare input data items for reproducible cmd.",
)
run_parser.add_argument(
"-o",
"--output",
action="append",
help="Declare output data items for reproducible cmd.",
)
run_parser.add_argument(
"-c",
"--code",
action="append",
help="Code dependencies which produce the output.",
)
run_parser.add_argument(
"--shell", action="store_true", default=False, help="Shell command"
)
run_parser.add_argument(
"-l",
"--lock",
action="store_true",
default=False,
help="Lock data item - disable reproduction.",
)
run_parser.add_argument(
"command", nargs=argparse.REMAINDER, help="Command to execute"
)
run_parser.set_defaults(func=CmdRun)
# Sync
sync_parser = subparsers.add_parser(
"sync",
parents=[parent_parser],
help="Synchronize data file with cloud (cloud settings already setup.",
)
sync_parser.add_argument(
"targets", metavar="", nargs="*", help="File or directory to sync."
)
sync_parser.add_argument(
"-j",
"--jobs",
type=int,
default=cpu_count(),
help="Number of jobs to run simultaneously.",
)
sync_parser.set_defaults(func=CmdDataSync)
# Repro
repro_parser = subparsers.add_parser(
"repro", parents=[parent_parser], help="Reproduce data"
)
repro_parser.add_argument(
"target", metavar="", nargs="*", help="Data items to reproduce."
)
repro_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Reproduce even if dependencies were not changed.",
)
repro_parser.add_argument(
"-s",
"--single-item",
action="store_true",
default=False,
help="Reproduce only single data item without recursive dependencies check.",
)
repro_parser.set_defaults(func=CmdRun)
# Remove
remove_parser = subparsers.add_parser(
"remove", parents=[parent_parser], help="Remove data item from data directory."
)
remove_parser.add_argument(
"target", metavar="", nargs="*", help="Target to remove - file or directory."
)
remove_parser.add_argument(
"-l",
"--keep-in-cloud",
action="store_true",
default=False,
help="Do not remove data from cloud.",
)
remove_parser.add_argument(
"-r", "--recursive", action="store_true", help="Remove directory recursively."
)
remove_parser.add_argument(
"-c",
"--keep-in-cache",
action="store_true",
default=False,
help="Do not remove data from cache.",
)
remove_parser.set_defaults(func=CmdRemove)
# Import
import_parser = subparsers.add_parser(
"import", parents=[parent_parser], help="Import file to data directory."
)
import_parser.add_argument("input", nargs="+", help="Input file/files.")
import_parser.add_argument("output", help="Output file/directory.")
import_parser.add_argument(
"-l",
"--lock",
action="store_true",
default=False,
help="Lock data item - disable reproduction.",
)
import_parser.add_argument(
"-j",
"--jobs",
type=int,
default=cpu_count(),
help="Number of jobs to run simultaneously.",
)
import_parser.set_defaults(func=CmdImportFile)
# Lock
lock_parser = subparsers.add_parser("lock", parents=[parent_parser], help="Lock")
lock_parser.add_argument(
"-l",
"--lock",
action="store_true",
default=False,
help="Lock data item - disable reproduction.",
)
lock_parser.add_argument(
"-u",
"--unlock",
action="store_true",
default=False,
help="Unlock data item - enable reproduction.",
)
lock_parser.add_argument(
"files", metavar="", nargs="*", help="Data items to lock or unlock."
)
lock_parser.set_defaults(func=CmdLock)
# Garbage collector
gc_parser = subparsers.add_parser(
"gc", parents=[parent_parser], help="Collect garbage"
)
gc_parser.add_argument(
"target", metavar="", nargs="*", help="Target to remove - file or directory."
)
gc_parser.add_argument(
"-l",
"--keep-in-cloud",
action="store_true",
default=False,
help="Do not remove data from cloud.",
)
gc_parser.add_argument(
"-r", "--recursive", action="store_true", help="Remove directory recursively."
)
gc_parser.add_argument(
"-c",
"--keep-in-cache",
action="store_false",
default=False,
help="Do not remove data from cache.",
)
gc_parser.set_defaults(func=CmdGC)
# Target
target_parser = subparsers.add_parser(
"target", parents=[parent_parser], help="Set default target"
)
target_parser.add_argument(
"target_file", metavar="", nargs="?", help="Target data item."
)
target_parser.add_argument(
"-u", "--unset", action="store_true", default=False, help="Reset target."
)
target_parser.set_defaults(func=CmdTarget)
if isinstance(argv, str):
argv = argv.split()
return parser.parse_args(argv)
|
https://github.com/iterative/dvc/issues/136
|
$ dvc sync
Traceback (most recent call last):
File "/Users/dmitry/src/dvc/dvc.py", line 4, in <module>
main()
File "/Users/dmitry/src/dvc/dvc/main.py", line 11, in main
sys.exit(instance.run())
File "/Users/dmitry/src/dvc/dvc/command/data_sync.py", line 26, in run
raise DataSyncError('Sync target is not specified')
dvc.command.data_sync.DataSyncError: Data sync error: Sync target is not specified
|
dvc.command.data_sync.DataSyncError
|
def run(self):
with DvcLock(self.is_locker, self.git):
cmd = [self.parsed_args.command] + self.parsed_args.args
data_items_from_args, not_data_items_from_args = self.argv_files_by_type(cmd)
return self.run_and_commit_if_needed(
cmd,
data_items_from_args,
not_data_items_from_args,
self.parsed_args.stdout,
self.parsed_args.stderr,
self.parsed_args.shell,
)
pass
|
def run(self):
with DvcLock(self.is_locker, self.git):
data_items_from_args, not_data_items_from_args = self.argv_files_by_type(
self.parsed_args.command
)
return self.run_and_commit_if_needed(
self.parsed_args.command,
data_items_from_args,
not_data_items_from_args,
self.parsed_args.stdout,
self.parsed_args.stderr,
self.parsed_args.shell,
)
pass
|
https://github.com/iterative/dvc/issues/136
|
$ dvc sync
Traceback (most recent call last):
File "/Users/dmitry/src/dvc/dvc.py", line 4, in <module>
main()
File "/Users/dmitry/src/dvc/dvc/main.py", line 11, in main
sys.exit(instance.run())
File "/Users/dmitry/src/dvc/dvc/command/data_sync.py", line 26, in run
raise DataSyncError('Sync target is not specified')
dvc.command.data_sync.DataSyncError: Data sync error: Sync target is not specified
|
dvc.command.data_sync.DataSyncError
|
def run(self):
if not self.skip_git_actions and not self.git.is_ready_to_go():
return 1
data_dir_path = self.get_not_existing_dir(self.parsed_args.data_dir)
cache_dir_path = self.get_not_existing_dir(self.parsed_args.cache_dir)
state_dir_path = self.get_not_existing_dir(self.parsed_args.state_dir)
conf_file_name = self.get_not_existing_conf_file_name()
data_dir_path.mkdir()
cache_dir_path.mkdir()
state_dir_path.mkdir()
Logger.info(
"Directories {}/, {}/ and {}/ were created".format(
data_dir_path.name, cache_dir_path.name, state_dir_path.name
)
)
data_empty_file = os.path.join(self.parsed_args.data_dir, self.EMPTY_FILE_NAME)
cache_empty_file = os.path.join(self.parsed_args.cache_dir, self.EMPTY_FILE_NAME)
state_empty_file = os.path.join(self.parsed_args.state_dir, self.EMPTY_FILE_NAME)
open(data_empty_file, "w").close()
open(cache_empty_file, "w").close()
open(state_empty_file, "w").close()
Logger.info(
"Empty files {}, {} and {} were created".format(
data_empty_file, cache_empty_file, state_empty_file
)
)
conf_file = open(conf_file_name, "wt")
conf_file.write(
self.CONFIG_TEMPLATE.format(
data_dir_path.name, cache_dir_path.name, state_dir_path.name
)
)
conf_file.close()
message = "DVC init. data dir {}, cache dir {}, state dir {}".format(
data_dir_path.name, cache_dir_path.name, state_dir_path.name
)
if self.commit_if_needed(message) == 1:
return 1
self.modify_gitignore(cache_dir_path.name)
return self.commit_if_needed("DVC init. Commit .gitignore file")
|
def run(self):
if not self.skip_git_actions and not self.git.is_ready_to_go():
return 1
data_dir_path = self.get_not_existing_dir(self.parsed_args.data_dir)
cache_dir_path = self.get_not_existing_dir(self.parsed_args.cache_dir)
state_dir_path = self.get_not_existing_dir(self.parsed_args.state_dir)
conf_file_name = self.get_not_existing_conf_file_name()
data_dir_path.mkdir()
cache_dir_path.mkdir()
state_dir_path.mkdir()
Logger.info(
"Directories {}/, {}/ and {}/ were created".format(
data_dir_path.name, cache_dir_path.name, state_dir_path.name
)
)
conf_file = open(conf_file_name, "wt")
conf_file.write(
self.CONFIG_TEMPLATE.format(
data_dir_path.name, cache_dir_path.name, state_dir_path.name
)
)
conf_file.close()
self.modify_gitignore(cache_dir_path.name)
message = "DVC init. data dir {}, cache dir {}, state dir {}".format(
data_dir_path.name, cache_dir_path.name, state_dir_path.name
)
return self.commit_if_needed(message)
|
https://github.com/iterative/dvc/issues/51
|
$ dvc data-sync data/summary.txt
Downloading cache file from S3 "nlx-shared/tag_classifier/.cache/summary.txt_6405e23"
Traceback (most recent call last):
File "/Users/dmitry/src/dvc/dvc2.py", line 77, in <module>
Runtime.run(CmdDataSync, args_start_loc=argv_offset)
File "/Users/dmitry/src/dvc/dvc/runtime.py", line 45, in run
sys.exit(instance.run())
File "/Users/dmitry/src/dvc/dvc/command/data_sync.py", line 60, in run
return self.sync_symlink(data_item)
File "/Users/dmitry/src/dvc/dvc/command/data_sync.py", line 84, in sync_symlink
self.sync_from_cloud(data_item)
File "/Users/dmitry/src/dvc/dvc/command/data_sync.py", line 184, in sync_from_cloud
return self._sync_from_cloud_aws(item)
File "/Users/dmitry/src/dvc/dvc/command/data_sync.py", line 106, in _sync_from_cloud_aws
key.get_contents_to_filename(item.resolved_cache.relative, cb=percent_cb)
File "/usr/local/lib/python2.7/site-packages/boto/s3/key.py", line 1714, in get_contents_to_filename
os.remove(filename)
OSError: [Errno 2] No such file or directory: '.cache/summary.txt_6405e23'
|
OSError
|
def cleanup(self):
"""Delete expired meta-data."""
if not self.expires:
return
self.collection.delete_many(
{"date_done": {"$lt": self.app.now() - self.expires_delta}},
)
self.group_collection.delete_many(
{"date_done": {"$lt": self.app.now() - self.expires_delta}},
)
|
def cleanup(self):
"""Delete expired meta-data."""
self.collection.delete_many(
{"date_done": {"$lt": self.app.now() - self.expires_delta}},
)
self.group_collection.delete_many(
{"date_done": {"$lt": self.app.now() - self.expires_delta}},
)
|
https://github.com/celery/celery/issues/6450
|
Task celery.backend_cleanup[75aa0973-534f-4812-a992-1ead4086f58c] raised unexpected: TypeError('unsupported type for timedelta seconds component: NoneType')
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/kombu/utils/objects.py", line 42, in __get__
return obj.__dict__[self.__name__]
KeyError: 'expires_delta'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/celery/app/trace.py", line 412, in trace_task
R = retval = fun(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/celery/app/trace.py", line 704, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/celery/app/builtins.py", line 25, in backend_cleanup
app.backend.cleanup()
File "/usr/local/lib/python3.9/site-packages/celery/backends/mongodb.py", line 258, in cleanup
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
File "/usr/local/lib/python3.9/site-packages/kombu/utils/objects.py", line 44, in __get__
value = obj.__dict__[self.__name__] = self.__get(obj)
File "/usr/local/lib/python3.9/site-packages/celery/backends/mongodb.py", line 312, in expires_delta
return timedelta(seconds=self.expires)
TypeError: unsupported type for timedelta seconds component: NoneType
|
KeyError
|
def from_dict(cls, d, app=None):
# We need to mutate the `kwargs` element in place to avoid confusing
# `freeze()` implementations which end up here and expect to be able to
# access elements from that dictionary later and refer to objects
# canonicalized here
orig_tasks = d["kwargs"]["tasks"]
d["kwargs"]["tasks"] = rebuilt_tasks = type(orig_tasks)(
(maybe_signature(task, app=app) for task in orig_tasks)
)
return _upgrade(
d,
group(rebuilt_tasks, app=app, **d["options"]),
)
|
def from_dict(cls, d, app=None):
return _upgrade(
d,
group(d["kwargs"]["tasks"], app=app, **d["options"]),
)
|
https://github.com/celery/celery/issues/6341
|
[2020-09-08 12:44:05,453: DEBUG/MainProcess] Task accepted: app.replace_with[dcea02fd-23a3-404a-9fdd-b213eb51c0d1] pid:453431
[2020-09-08 12:44:05,457: ERROR/ForkPoolWorker-8] Task app.replace_with[dcea02fd-23a3-404a-9fdd-b213eb51c0d1] raised unexpected: AttributeError("'dict' object has no attribute '_app'")
Traceback (most recent call last):
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/kombu/utils/objects.py", line 41, in __get__
return obj.__dict__[self.__name__]
KeyError: 'app'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/app/trace.py", line 409, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/app/trace.py", line 701, in __protected_call__
return self.run(*args, **kwargs)
File "/home/maybe/tmp/capp/app.py", line 13, in replace_with
raise self.replace(sig)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/app/task.py", line 894, in replace
sig.freeze(self.request.id)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/canvas.py", line 1302, in freeze
self.tasks = group(self.tasks, app=self.app)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/kombu/utils/objects.py", line 43, in __get__
value = obj.__dict__[self.__name__] = self.__get(obj)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/canvas.py", line 1456, in app
return self._get_app(self.body)
File "/home/maybe/tmp/capp/venv/lib64/python3.8/site-packages/celery/canvas.py", line 1466, in _get_app
app = tasks[0]._app
AttributeError: 'dict' object has no attribute '_app'
|
KeyError
|
def convert(self, value, param, ctx):
return text.str_to_list(value)
|
def convert(self, value, param, ctx):
return set(text.str_to_list(value))
|
https://github.com/celery/celery/issues/6399
|
Traceback (most recent call last):
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/bin/celery", line 8, in <module>
sys.exit(main())
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/celery/__main__.py", line 15, in main
sys.exit(_main())
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/celery/bin/celery.py", line 150, in main
return celery(auto_envvar_prefix="CELERY")
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/click/decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/celery/bin/control.py", line 125, in inspect
replies = ctx.obj.app.control.inspect(timeout=timeout,
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/celery/app/control.py", line 97, in _request
return self._prepare(self.app.control.broadcast(
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/celery/app/control.py", line 477, in broadcast
return self.mailbox(conn)._broadcast(
File "/Users/aszubarev/Library/Caches/pypoetry/virtualenvs/service-notification-py-xHAuYcng-py3.8/lib/python3.8/site-packages/kombu/pidbox.py", line 319, in _broadcast
raise ValueError(
ValueError: destination must be a list/tuple not <class 'set'>
Sentry is attempting to send 0 pending error messages
Waiting up to 2 seconds
Press Ctrl-C to quit
|
ValueError
|
def prepare_models(self, engine):
if not self.prepared:
# SQLAlchemy will check if the items exist before trying to
# create them, which is a race condition. If it raises an error
# in one iteration, the next may pass all the existence checks
# and the call will succeed.
retries = 0
while True:
try:
ResultModelBase.metadata.create_all(engine)
except DatabaseError:
if retries < PREPARE_MODELS_MAX_RETRIES:
sleep_amount_ms = get_exponential_backoff_interval(
10, retries, 1000, True
)
time.sleep(sleep_amount_ms / 1000)
retries += 1
else:
raise
else:
break
self.prepared = True
|
def prepare_models(self, engine):
if not self.prepared:
ResultModelBase.metadata.create_all(engine)
self.prepared = True
|
https://github.com/celery/celery/issues/6296
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/redacted.py", line 168, in _redacted
result = async_result.get()
File "/usr/local/lib/python3.7/site-packages/celery/result.py", line 226, in get
self.maybe_throw(callback=callback)
File "/usr/local/lib/python3.7/site-packages/celery/result.py", line 342, in maybe_throw
self.throw(value, self._to_remote_traceback(tb))
File "/usr/local/lib/python3.7/site-packages/celery/result.py", line 335, in throw
self.on_ready.throw(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/vine/promises.py", line 244, in throw
reraise(type(exc), exc, tb)
File "/usr/local/lib/python3.7/site-packages/vine/five.py", line 195, in reraise
raise value
Exception: <class 'sqlalchemy.exc.IntegrityError'>(('(psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint "pg_type_typname_nsp_index"\nDETAIL: Key (typname, typnamespace)=(taskset_id_sequence, 2200) already exists.\n',))
|
Exception
|
def _detect_handler(self, logfile=None):
"""Create handler from filename, an open stream or `None` (stderr)."""
logfile = sys.__stderr__ if logfile is None else logfile
if hasattr(logfile, "write"):
return logging.StreamHandler(logfile)
return WatchedFileHandler(logfile, encoding="utf-8")
|
def _detect_handler(self, logfile=None):
"""Create handler from filename, an open stream or `None` (stderr)."""
logfile = sys.__stderr__ if logfile is None else logfile
if hasattr(logfile, "write"):
return logging.StreamHandler(logfile)
return WatchedFileHandler(logfile)
|
https://github.com/celery/celery/issues/5144
|
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] --- Logging error ---
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] Traceback (most recent call last):
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] File "/usr/lib/python3.6/logging/__init__.py", line 994, in emit
stream.write(msg)
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] UnicodeEncodeError: 'ascii' codec can't encode characters in position 923-928: ordinal not in range(128)
[2018-10-24 15:35:00,541: WARNING/ForkPoolWorker-7] Call stack:
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/__main__.py", line 20, in <module>
main()
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/__main__.py", line 16, in main
_main()
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 322, in main
cmd.execute_from_commandline(argv)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 496, in execute_from_commandline
super(CeleryCommand, self).execute_from_commandline(argv)))
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/base.py", line 275, in execute_from_commandline
return self.handle_argv(self.prog_name, argv[1:])
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 488, in handle_argv
return self.execute(command, argv)
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/celery.py", line 420, in execute
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
[2018-10-24 15:35:00,542: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/worker.py", line 223, in run_from_argv
return self(*args, **options)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/base.py", line 238, in __call__
ret = self.run(*args, **kwargs)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bin/worker.py", line 258, in run
worker.start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/worker/worker.py", line 205, in start
self.blueprint.start(self)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bootsteps.py", line 119, in start
step.start(parent)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/bootsteps.py", line 369, in start
return self.obj.start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/base.py", line 131, in start
self.on_start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/prefork.py", line 112, in on_start
**self.options)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/asynpool.py", line 432, in __init__
super(AsynPool, self).__init__(processes, *args, **kwargs)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 1007, in __init__
self._create_worker_process(i)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/concurrency/asynpool.py", line 449, in _create_worker_process
return super(AsynPool, self)._create_worker_process(i)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 1116, in _create_worker_process
w.start()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/process.py", line 124, in start
self._popen = self._Popen(self)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/context.py", line 333, in _Popen
return Popen(process_obj)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/popen_fork.py", line 24, in __init__
self._launch(process_obj)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/popen_fork.py", line 79, in _launch
code = process_obj._bootstrap()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/process.py", line 327, in _bootstrap
self.run()
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
[2018-10-24 15:35:00,543: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 289, in __call__
sys.exit(self.workloop(pid=pid))
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/billiard/pool.py", line 358, in workloop
result = (True, prepare_result(fun(*args, **kwargs)))
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/app/trace.py", line 549, in _fast_trace_task
uuid, args, kwargs, request,
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/app/trace.py", line 458, in trace_task
'runtime': T,
[2018-10-24 15:35:00,544: WARNING/ForkPoolWorker-7] File "/usr/local/lib/python3.6/dist-packages/celery/app/trace.py", line 124, in info
logger.info(fmt, context, extra={'data': context})
|
UnicodeEncodeError
|
def __init__(
self, servers=None, keyspace=None, table=None, entry_ttl=None, port=9042, **kwargs
):
super(CassandraBackend, self).__init__(**kwargs)
if not cassandra:
raise ImproperlyConfigured(E_NO_CASSANDRA)
conf = self.app.conf
self.servers = servers or conf.get("cassandra_servers", None)
self.port = port or conf.get("cassandra_port", None)
self.keyspace = keyspace or conf.get("cassandra_keyspace", None)
self.table = table or conf.get("cassandra_table", None)
self.cassandra_options = conf.get("cassandra_options", {})
if not self.servers or not self.keyspace or not self.table:
raise ImproperlyConfigured("Cassandra backend not configured.")
expires = entry_ttl or conf.get("cassandra_entry_ttl", None)
self.cqlexpires = Q_EXPIRES.format(expires) if expires is not None else ""
read_cons = conf.get("cassandra_read_consistency") or "LOCAL_QUORUM"
write_cons = conf.get("cassandra_write_consistency") or "LOCAL_QUORUM"
self.read_consistency = getattr(
cassandra.ConsistencyLevel, read_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM
)
self.write_consistency = getattr(
cassandra.ConsistencyLevel, write_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM
)
self.auth_provider = None
auth_provider = conf.get("cassandra_auth_provider", None)
auth_kwargs = conf.get("cassandra_auth_kwargs", None)
if auth_provider and auth_kwargs:
auth_provider_class = getattr(cassandra.auth, auth_provider, None)
if not auth_provider_class:
raise ImproperlyConfigured(E_NO_SUCH_CASSANDRA_AUTH_PROVIDER)
self.auth_provider = auth_provider_class(**auth_kwargs)
self._cluster = None
self._session = None
self._write_stmt = None
self._read_stmt = None
self._lock = threading.RLock()
|
def __init__(
self, servers=None, keyspace=None, table=None, entry_ttl=None, port=9042, **kwargs
):
super(CassandraBackend, self).__init__(**kwargs)
if not cassandra:
raise ImproperlyConfigured(E_NO_CASSANDRA)
conf = self.app.conf
self.servers = servers or conf.get("cassandra_servers", None)
self.port = port or conf.get("cassandra_port", None)
self.keyspace = keyspace or conf.get("cassandra_keyspace", None)
self.table = table or conf.get("cassandra_table", None)
self.cassandra_options = conf.get("cassandra_options", {})
if not self.servers or not self.keyspace or not self.table:
raise ImproperlyConfigured("Cassandra backend not configured.")
expires = entry_ttl or conf.get("cassandra_entry_ttl", None)
self.cqlexpires = Q_EXPIRES.format(expires) if expires is not None else ""
read_cons = conf.get("cassandra_read_consistency") or "LOCAL_QUORUM"
write_cons = conf.get("cassandra_write_consistency") or "LOCAL_QUORUM"
self.read_consistency = getattr(
cassandra.ConsistencyLevel, read_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM
)
self.write_consistency = getattr(
cassandra.ConsistencyLevel, write_cons, cassandra.ConsistencyLevel.LOCAL_QUORUM
)
self.auth_provider = None
auth_provider = conf.get("cassandra_auth_provider", None)
auth_kwargs = conf.get("cassandra_auth_kwargs", None)
if auth_provider and auth_kwargs:
auth_provider_class = getattr(cassandra.auth, auth_provider, None)
if not auth_provider_class:
raise ImproperlyConfigured(E_NO_SUCH_CASSANDRA_AUTH_PROVIDER)
self.auth_provider = auth_provider_class(**auth_kwargs)
self._connection = None
self._session = None
self._write_stmt = None
self._read_stmt = None
self._make_stmt = None
|
https://github.com/celery/celery/issues/6143
|
[2020-06-02 21:52:19,592: ERROR/MainProcess] Task add[4bd528a0-c1ba-456c-b74e-7dee054616bb] raised unexpected: AttributeError("'NoneType' object has no attribute 'execute'")
Traceback (most recent call last):
File "/home/bx/project/celery/celery/app/trace.py", line 480, in trace_task
uuid, retval, task_request, publish_result,
File "/home/bx/project/celery/celery/backends/base.py", line 158, in mark_as_done
self.store_result(task_id, result, state, request=request)
File "/home/bx/project/celery/celery/backends/base.py", line 443, in store_result
request=request, **kwargs)
File "/home/bx/project/celery/celery/backends/cassandra.py", line 201, in _store_result
self._session.execute(self._write_stmt, (
AttributeError: 'NoneType' object has no attribute 'execute'
|
AttributeError
|
def _get_connection(self, write=False):
"""Prepare the connection for action.
Arguments:
write (bool): are we a writer?
"""
if self._session is not None:
return
self._lock.acquire()
try:
if self._session is not None:
return
self._cluster = cassandra.cluster.Cluster(
self.servers,
port=self.port,
auth_provider=self.auth_provider,
**self.cassandra_options,
)
self._session = self._cluster.connect(self.keyspace)
# We're forced to do concatenation below, as formatting would
# blow up on superficial %s that'll be processed by Cassandra
self._write_stmt = cassandra.query.SimpleStatement(
Q_INSERT_RESULT.format(table=self.table, expires=self.cqlexpires),
)
self._write_stmt.consistency_level = self.write_consistency
self._read_stmt = cassandra.query.SimpleStatement(
Q_SELECT_RESULT.format(table=self.table),
)
self._read_stmt.consistency_level = self.read_consistency
if write:
# Only possible writers "workers" are allowed to issue
# CREATE TABLE. This is to prevent conflicting situations
# where both task-creator and task-executor would issue it
# at the same time.
# Anyway; if you're doing anything critical, you should
# have created this table in advance, in which case
# this query will be a no-op (AlreadyExists)
make_stmt = cassandra.query.SimpleStatement(
Q_CREATE_RESULT_TABLE.format(table=self.table),
)
make_stmt.consistency_level = self.write_consistency
try:
self._session.execute(make_stmt)
except cassandra.AlreadyExists:
pass
except cassandra.OperationTimedOut:
# a heavily loaded or gone Cassandra cluster failed to respond.
# leave this class in a consistent state
if self._cluster is not None:
self._cluster.shutdown() # also shuts down _session
self._cluster = None
self._session = None
raise # we did fail after all - reraise
finally:
self._lock.release()
|
def _get_connection(self, write=False):
"""Prepare the connection for action.
Arguments:
write (bool): are we a writer?
"""
if self._connection is not None:
return
try:
self._connection = cassandra.cluster.Cluster(
self.servers,
port=self.port,
auth_provider=self.auth_provider,
**self.cassandra_options,
)
self._session = self._connection.connect(self.keyspace)
# We're forced to do concatenation below, as formatting would
# blow up on superficial %s that'll be processed by Cassandra
self._write_stmt = cassandra.query.SimpleStatement(
Q_INSERT_RESULT.format(table=self.table, expires=self.cqlexpires),
)
self._write_stmt.consistency_level = self.write_consistency
self._read_stmt = cassandra.query.SimpleStatement(
Q_SELECT_RESULT.format(table=self.table),
)
self._read_stmt.consistency_level = self.read_consistency
if write:
# Only possible writers "workers" are allowed to issue
# CREATE TABLE. This is to prevent conflicting situations
# where both task-creator and task-executor would issue it
# at the same time.
# Anyway; if you're doing anything critical, you should
# have created this table in advance, in which case
# this query will be a no-op (AlreadyExists)
self._make_stmt = cassandra.query.SimpleStatement(
Q_CREATE_RESULT_TABLE.format(table=self.table),
)
self._make_stmt.consistency_level = self.write_consistency
try:
self._session.execute(self._make_stmt)
except cassandra.AlreadyExists:
pass
except cassandra.OperationTimedOut:
# a heavily loaded or gone Cassandra cluster failed to respond.
# leave this class in a consistent state
if self._connection is not None:
self._connection.shutdown() # also shuts down _session
self._connection = None
self._session = None
raise # we did fail after all - reraise
|
https://github.com/celery/celery/issues/6143
|
[2020-06-02 21:52:19,592: ERROR/MainProcess] Task add[4bd528a0-c1ba-456c-b74e-7dee054616bb] raised unexpected: AttributeError("'NoneType' object has no attribute 'execute'")
Traceback (most recent call last):
File "/home/bx/project/celery/celery/app/trace.py", line 480, in trace_task
uuid, retval, task_request, publish_result,
File "/home/bx/project/celery/celery/backends/base.py", line 158, in mark_as_done
self.store_result(task_id, result, state, request=request)
File "/home/bx/project/celery/celery/backends/base.py", line 443, in store_result
request=request, **kwargs)
File "/home/bx/project/celery/celery/backends/cassandra.py", line 201, in _store_result
self._session.execute(self._write_stmt, (
AttributeError: 'NoneType' object has no attribute 'execute'
|
AttributeError
|
def set(self, key, value):
"""Insert a doc with value into task attribute and _key as key."""
try:
logging.debug(
'INSERT {{ task: {task}, _key: "{key}" }} INTO {collection}'.format(
collection=self.collection, key=key, task=value
)
)
self.db.AQLQuery(
'INSERT {{ task: {task}, _key: "{key}" }} INTO {collection}'.format(
collection=self.collection, key=key, task=value
)
)
except AQLQueryError as aql_err:
logging.error(aql_err)
except Exception as err:
logging.error(err)
|
def set(self, key, value, state):
"""Insert a doc with value into task attribute and _key as key."""
try:
logging.debug(
'INSERT {{ task: {task}, _key: "{key}" }} INTO {collection}'.format(
collection=self.collection, key=key, task=value
)
)
self.db.AQLQuery(
'INSERT {{ task: {task}, _key: "{key}" }} INTO {collection}'.format(
collection=self.collection, key=key, task=value
)
)
except AQLQueryError as aql_err:
logging.error(aql_err)
except Exception as err:
logging.error(err)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
"""Store a value for a given key.
Args:
key: The key at which to store the value.
value: The value to store.
"""
key = bytes_to_str(key)
LOGGER.debug("Creating Azure Block Blob at %s/%s", self._container_name, key)
return self._client.create_blob_from_text(self._container_name, key, value)
|
def set(self, key, value, state):
"""Store a value for a given key.
Args:
key: The key at which to store the value.
value: The value to store.
"""
key = bytes_to_str(key)
LOGGER.debug("Creating Azure Block Blob at %s/%s", self._container_name, key)
return self._client.create_blob_from_text(self._container_name, key, value)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
raise NotImplementedError("Must implement the set method.")
|
def set(self, key, value, state):
raise NotImplementedError("Must implement the set method.")
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs):
meta = self._get_result_meta(
result=result, state=state, traceback=traceback, request=request
)
meta["task_id"] = bytes_to_str(task_id)
# Retrieve metadata from the backend, if the status
# is a success then we ignore any following update to the state.
# This solves a task deduplication issue because of network
# partitioning or lost workers. This issue involved a race condition
# making a lost task overwrite the last successful result in the
# result backend.
current_meta = self._get_task_meta_for(task_id)
if current_meta["status"] == states.SUCCESS:
return result
self._set_with_state(self.get_key_for_task(task_id), self.encode(meta), state)
return result
|
def _store_result(self, task_id, result, state, traceback=None, request=None, **kwargs):
meta = self._get_result_meta(
result=result, state=state, traceback=traceback, request=request
)
meta["task_id"] = bytes_to_str(task_id)
# Retrieve metadata from the backend, if the status
# is a success then we ignore any following update to the state.
# This solves a task deduplication issue because of network
# partitioning or lost workers. This issue involved a race condition
# making a lost task overwrite the last successful result in the
# result backend.
current_meta = self._get_task_meta_for(task_id)
if current_meta["status"] == states.SUCCESS:
return result
self.set(self.get_key_for_task(task_id), self.encode(meta), state)
return result
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def _save_group(self, group_id, result):
self._set_with_state(
self.get_key_for_group(group_id),
self.encode({"result": result.as_tuple()}),
states.SUCCESS,
)
return result
|
def _save_group(self, group_id, result):
self.set(
self.get_key_for_group(group_id),
self.encode({"result": result.as_tuple()}),
states.SUCCESS,
)
return result
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
return self.client.set(key, value, self.expires)
|
def set(self, key, value, state):
return self.client.set(key, value, self.expires)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
"""Set a key in Consul.
Before creating the key it will create a session inside Consul
where it creates a session with a TTL
The key created afterwards will reference to the session's ID.
If the session expires it will remove the key so that results
can auto expire from the K/V store
"""
session_name = bytes_to_str(key)
key = self._key_to_consul_key(key)
logger.debug(
"Trying to create Consul session %s with TTL %d", session_name, self.expires
)
session_id = self.client.session.create(
name=session_name, behavior="delete", ttl=self.expires
)
logger.debug("Created Consul session %s", session_id)
logger.debug("Writing key %s to Consul", key)
return self.client.kv.put(key=key, value=value, acquire=session_id)
|
def set(self, key, value, state):
"""Set a key in Consul.
Before creating the key it will create a session inside Consul
where it creates a session with a TTL
The key created afterwards will reference to the session's ID.
If the session expires it will remove the key so that results
can auto expire from the K/V store
"""
session_name = bytes_to_str(key)
key = self._key_to_consul_key(key)
logger.debug(
"Trying to create Consul session %s with TTL %d", session_name, self.expires
)
session_id = self.client.session.create(
name=session_name, behavior="delete", ttl=self.expires
)
logger.debug("Created Consul session %s", session_id)
logger.debug("Writing key %s to Consul", key)
return self.client.kv.put(key=key, value=value, acquire=session_id)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
"""Store a value for a given key.
Args:
key: The key at which to store the value.
value: The value to store.
"""
key = bytes_to_str(key)
LOGGER.debug(
"Creating CosmosDB document %s/%s/%s",
self._database_name,
self._collection_name,
key,
)
self._client.CreateDocument(
self._collection_link, {"id": key, "value": value}, self._get_partition_key(key)
)
|
def set(self, key, value, state):
"""Store a value for a given key.
Args:
key: The key at which to store the value.
value: The value to store.
"""
key = bytes_to_str(key)
LOGGER.debug(
"Creating CosmosDB document %s/%s/%s",
self._database_name,
self._collection_name,
key,
)
self._client.CreateDocument(
self._collection_link, {"id": key, "value": value}, self._get_partition_key(key)
)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
self.connection.set(key, value, ttl=self.expires, format=FMT_AUTO)
|
def set(self, key, value, state):
self.connection.set(key, value, ttl=self.expires, format=FMT_AUTO)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
key = bytes_to_str(key)
data = {"_id": key, "value": value}
try:
self.connection.save(data)
except pycouchdb.exceptions.Conflict:
# document already exists, update it
data = self.connection.get(key)
data["value"] = value
self.connection.save(data)
|
def set(self, key, value, state):
key = bytes_to_str(key)
data = {"_id": key, "value": value}
try:
self.connection.save(data)
except pycouchdb.exceptions.Conflict:
# document already exists, update it
data = self.connection.get(key)
data["value"] = value
self.connection.save(data)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
key = string(key)
request_parameters = self._prepare_put_request(key, value)
self.client.put_item(**request_parameters)
|
def set(self, key, value, state):
key = string(key)
request_parameters = self._prepare_put_request(key, value)
self.client.put_item(**request_parameters)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
return self._set_with_state(key, value, None)
|
def set(self, key, value, state):
body = {
"result": value,
"@timestamp": "{0}Z".format(datetime.utcnow().isoformat()[:-3]),
}
try:
self._index(
id=key,
body=body,
)
except elasticsearch.exceptions.ConflictError:
# document already exists, update it
self._update(key, body, state)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def _update(self, id, body, state, **kwargs):
"""Update state in a conflict free manner.
If state is defined (not None), this will not update ES server if either:
* existing state is success
* existing state is a ready state and current state in not a ready state
This way, a Retry state cannot override a Success or Failure, and chord_unlock
will not retry indefinitely.
"""
body = {bytes_to_str(k): v for k, v in items(body)}
try:
res_get = self._get(key=id)
if not res_get.get("found"):
return self._index(id, body, **kwargs)
# document disappeared between index and get calls.
except elasticsearch.exceptions.NotFoundError:
return self._index(id, body, **kwargs)
try:
meta_present_on_backend = self.decode_result(res_get["_source"]["result"])
except (TypeError, KeyError):
pass
else:
if meta_present_on_backend["status"] == states.SUCCESS:
# if stored state is already in success, do nothing
return {"result": "noop"}
elif (
meta_present_on_backend["status"] in states.READY_STATES
and state in states.UNREADY_STATES
):
# if stored state is in ready state and current not, do nothing
return {"result": "noop"}
# get current sequence number and primary term
# https://www.elastic.co/guide/en/elasticsearch/reference/current/optimistic-concurrency-control.html
seq_no = res_get.get("_seq_no", 1)
prim_term = res_get.get("_primary_term", 1)
# try to update document with current seq_no and primary_term
res = self.server.update(
id=bytes_to_str(id),
index=self.index,
doc_type=self.doc_type,
body={"doc": body},
params={"if_primary_term": prim_term, "if_seq_no": seq_no},
**kwargs,
)
# result is elastic search update query result
# noop = query did not update any document
# updated = at least one document got updated
if res["result"] == "noop":
raise elasticsearch.exceptions.ConflictError(
409, "conflicting update occurred concurrently", {}
)
return res
|
def _update(self, id, body, state, **kwargs):
body = {bytes_to_str(k): v for k, v in items(body)}
try:
res_get = self._get(key=id)
if not res_get.get("found"):
return self._index(id, body, **kwargs)
# document disappeared between index and get calls.
except elasticsearch.exceptions.NotFoundError:
return self._index(id, body, **kwargs)
try:
meta_present_on_backend = self.decode_result(res_get["_source"]["result"])
except (TypeError, KeyError):
pass
else:
if meta_present_on_backend["status"] == states.SUCCESS:
# if stored state is already in success, do nothing
return {"result": "noop"}
elif (
meta_present_on_backend["status"] in states.READY_STATES
and state in states.UNREADY_STATES
):
# if stored state is in ready state and current not, do nothing
return {"result": "noop"}
# get current sequence number and primary term
# https://www.elastic.co/guide/en/elasticsearch/reference/current/optimistic-concurrency-control.html
seq_no = res_get.get("_seq_no", 1)
prim_term = res_get.get("_primary_term", 1)
# try to update document with current seq_no and primary_term
res = self.server.update(
id=bytes_to_str(id),
index=self.index,
doc_type=self.doc_type,
body={"doc": body},
params={"if_primary_term": prim_term, "if_seq_no": seq_no},
**kwargs,
)
# result is elastic search update query result
# noop = query did not update any document
# updated = at least one document got updated
if res["result"] == "noop":
raise elasticsearch.exceptions.ConflictError(
409, "conflicting update occurred concurrently", {}
)
return res
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
with self.open(self._filename(key), "wb") as outfile:
outfile.write(ensure_bytes(value))
|
def set(self, key, value, state):
with self.open(self._filename(key), "wb") as outfile:
outfile.write(ensure_bytes(value))
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value, **retry_policy):
return self.ensure(self._set, (key, value), **retry_policy)
|
def set(self, key, value, state, **retry_policy):
return self.ensure(self._set, (key, value), **retry_policy)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
_key = self.bucket.new(key, data=value)
_key.store()
|
def set(self, key, value, state):
_key = self.bucket.new(key, data=value)
_key.store()
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def set(self, key, value):
key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
s3_object.put(Body=value)
|
def set(self, key, value, state):
key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
s3_object.put(Body=value)
|
https://github.com/celery/celery/issues/6155
|
celery_default_1 | [2020-06-04 19:51:47,723: ERROR/MainProcess] Pool callback raised exception: TypeError('set() takes 3 positional arguments but 4 were given',)
celery_default_1 | Traceback (most recent call last):
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/billiard/pool.py", line 1796, in safe_apply_callback
celery_default_1 | fun(*args, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/worker/request.py", line 528, in on_failure
celery_default_1 | store_result=self.store_errors,
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 169, in mark_as_failure
celery_default_1 | traceback=traceback, request=request)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 443, in store_result
celery_default_1 | request=request, **kwargs)
celery_default_1 | File "/usr/share/dashboard/venv/lib/python3.6/site-packages/celery/backends/base.py", line 858, in _store_result
celery_default_1 | self.set(self.get_key_for_task(task_id), self.encode(meta), state)
celery_default_1 | TypeError: set() takes 3 positional arguments but 4 were given
|
TypeError
|
def _setdefaultopt(self, d, alt, value):
for opt in alt[1:]:
try:
return d[opt]
except KeyError:
pass
value = d.setdefault(alt[0], os.path.normpath(value))
dir_path = os.path.dirname(value)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return value
|
def _setdefaultopt(self, d, alt, value):
for opt in alt[1:]:
try:
return d[opt]
except KeyError:
pass
value = os.path.normpath(value)
dir_path = os.path.dirname(value)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return d.setdefault(alt[0], value)
|
https://github.com/celery/celery/issues/6136
|
celery multi v4.4.3 (cliffs)
_annotate_with_default_opts: print options
OrderedDict([('--app', 'service.celery:app'),
('--pidfile', '/var/run/demo/celeryd-%n.pid'),
('--logfile', '/var/log/demo/celeryd-%n%I.log'),
('--loglevel', 'INFO'),
('--workdir', '/var/lib/demo-celery'),
('--events', None),
('--heartbeat-interval', '5'),
('--without-gossip', None),
('--queues', 'high'),
('--concurrency', '1'),
('-n', 'high@celeryd.worker')])
Traceback (most recent call last):
File "/var/www/misc/ve-2006011156/bin/celery", line 8, in <module>
sys.exit(main())
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/__main__.py", line 16, in main
_main()
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/celery.py", line 322, in main
cmd.execute_from_commandline(argv)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/celery.py", line 495, in execute_from_commandline
super(CeleryCommand, self).execute_from_commandline(argv)))
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/base.py", line 305, in execute_from_commandline
return self.handle_argv(self.prog_name, argv[1:])
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/celery.py", line 487, in handle_argv
return self.execute(command, argv)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/celery.py", line 419, in execute
).run_from_argv(self.prog_name, argv[1:], command=argv[0])
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/celery.py", line 335, in run_from_argv
return cmd.execute_from_commandline([command] + argv)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/multi.py", line 266, in execute_from_commandline
return self.call_command(argv[0], argv[1:])
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/multi.py", line 273, in call_command
return self.commands[command](*argv) or EX_OK
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/multi.py", line 143, in _inner
return fun(self, *args, **kwargs)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/multi.py", line 151, in _inner
return fun(self, self.cluster_from_argv(argv), **kwargs)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/multi.py", line 361, in cluster_from_argv
_, cluster = self._cluster_from_argv(argv, cmd=cmd)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/bin/multi.py", line 366, in _cluster_from_argv
return p, self.Cluster(list(nodes), cmd=cmd)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/apps/multi.py", line 306, in <genexpr>
for name in names
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/apps/multi.py", line 314, in _node_from_options
p.optmerge(namespace, options), p.passthrough)
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/apps/multi.py", line 136, in __init__
options or OrderedDict())
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/apps/multi.py", line 145, in _annotate_with_default_opts
self._setdefaultopt(options, ['--pidfile', '-p'], '/var/run/celery/%n.pid')
File "/var/www/misc/ve-2006011156/lib/python3.7/site-packages/celery/apps/multi.py", line 159, in _setdefaultopt
os.makedirs(dir_path)
File "/var/www/misc/ve-2006011156/lib/python3.7/os.py", line 223, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/var/run/celery'
systemd[1]: demo@celeryd.service: Control process exited, code=exited, status=1/FAILURE
|
PermissionError
|
def _task_from_fun(self, fun, name=None, base=None, bind=False, **options):
if not self.finalized and not self.autofinalize:
raise RuntimeError("Contract breach: app not finalized")
name = name or self.gen_task_name(fun.__name__, fun.__module__)
base = base or self.Task
if name not in self._tasks:
run = fun if bind else staticmethod(fun)
task = type(
fun.__name__,
(base,),
dict(
{
"app": self,
"name": name,
"run": run,
"_decorated": True,
"__doc__": fun.__doc__,
"__module__": fun.__module__,
"__header__": staticmethod(head_from_fun(fun, bound=bind)),
"__wrapped__": run,
},
**options,
),
)()
# for some reason __qualname__ cannot be set in type()
# so we have to set it here.
try:
task.__qualname__ = fun.__qualname__
except AttributeError:
pass
self._tasks[task.name] = task
task.bind(self) # connects task to this app
autoretry_for = tuple(
options.get("autoretry_for", getattr(task, "autoretry_for", ()))
)
retry_kwargs = options.get("retry_kwargs", getattr(task, "retry_kwargs", {}))
retry_backoff = int(
options.get("retry_backoff", getattr(task, "retry_backoff", False))
)
retry_backoff_max = int(
options.get("retry_backoff_max", getattr(task, "retry_backoff_max", 600))
)
retry_jitter = options.get("retry_jitter", getattr(task, "retry_jitter", True))
if autoretry_for and not hasattr(task, "_orig_run"):
@wraps(task.run)
def run(*args, **kwargs):
try:
return task._orig_run(*args, **kwargs)
except Ignore:
# If Ignore signal occures task shouldn't be retried,
# even if it suits autoretry_for list
raise
except Retry:
raise
except autoretry_for as exc:
if retry_backoff:
retry_kwargs["countdown"] = get_exponential_backoff_interval(
factor=retry_backoff,
retries=task.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter,
)
raise task.retry(exc=exc, **retry_kwargs)
task._orig_run, task.run = task.run, run
else:
task = self._tasks[name]
return task
|
def _task_from_fun(self, fun, name=None, base=None, bind=False, **options):
if not self.finalized and not self.autofinalize:
raise RuntimeError("Contract breach: app not finalized")
name = name or self.gen_task_name(fun.__name__, fun.__module__)
base = base or self.Task
if name not in self._tasks:
run = fun if bind else staticmethod(fun)
task = type(
fun.__name__,
(base,),
dict(
{
"app": self,
"name": name,
"run": run,
"_decorated": True,
"__doc__": fun.__doc__,
"__module__": fun.__module__,
"__header__": staticmethod(head_from_fun(fun, bound=bind)),
"__wrapped__": run,
},
**options,
),
)()
# for some reason __qualname__ cannot be set in type()
# so we have to set it here.
try:
task.__qualname__ = fun.__qualname__
except AttributeError:
pass
self._tasks[task.name] = task
task.bind(self) # connects task to this app
autoretry_for = tuple(
options.get("autoretry_for", getattr(task, "autoretry_for", ()))
)
retry_kwargs = options.get("retry_kwargs", getattr(task, "retry_kwargs", {}))
retry_backoff = int(
options.get("retry_backoff", getattr(task, "retry_backoff", False))
)
retry_backoff_max = int(
options.get("retry_backoff_max", getattr(task, "retry_backoff_max", 600))
)
retry_jitter = options.get("retry_jitter", getattr(task, "retry_jitter", True))
if autoretry_for and not hasattr(task, "_orig_run"):
@wraps(task.run)
def run(*args, **kwargs):
try:
return task._orig_run(*args, **kwargs)
except Ignore:
# If Ignore signal occures task shouldn't be retried,
# even if it suits autoretry_for list
raise
except autoretry_for as exc:
if retry_backoff:
retry_kwargs["countdown"] = get_exponential_backoff_interval(
factor=retry_backoff,
retries=task.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter,
)
raise task.retry(exc=exc, **retry_kwargs)
task._orig_run, task.run = task.run, run
else:
task = self._tasks[name]
return task
|
https://github.com/celery/celery/issues/6135
|
[2020-05-31 23:28:34,434: INFO/MainProcess] Connected to amqp://remote_worker:**@127.0.0.1:5672//
[2020-05-31 23:28:34,453: INFO/MainProcess] mingle: searching for neighbors
[2020-05-31 23:28:35,487: INFO/MainProcess] mingle: all alone
[2020-05-31 23:28:35,528: WARNING/MainProcess] /home/ubuntu/.local/lib/python3.7/site-packages/celery/fixups/django.py:203: UserWarning: Using settings.DEBUG leads to a memory
leak, never use this setting in production environments!
leak, never use this setting in production environments!''')
[2020-05-31 23:28:35,529: INFO/MainProcess] celery@testroom ready.
[2020-05-31 23:28:47,351: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906]
[2020-05-31 23:28:47,689: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:28:47,690: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:28:47,721: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:28:57.692348+00:00]
[2020-05-31 23:28:47,722: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:28:57.716321+00:00]
[2020-05-31 23:28:47,777: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] retry: Retry in 10s: Retry(Retry(...), Exception('i have filled now'), 10)
[2020-05-31 23:28:57,999: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:28:58,000: WARNING/ForkPoolWorker-1] ended
[2020-05-31 23:28:58,062: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] succeeded in 0.34440315900428686s: None
[2020-05-31 23:28:58,301: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:28:58,302: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:28:58,304: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:08.303091+00:00]
[2020-05-31 23:28:58,307: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:08.306141+00:00]
[2020-05-31 23:28:58,368: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] retry: Retry in 10s: Retry(Retry(...), Exception('i have filled now'), 10)
[2020-05-31 23:29:08,572: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:08,573: WARNING/ForkPoolWorker-1] ended
[2020-05-31 23:29:08,633: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] succeeded in 0.3256059319974156s: None
[2020-05-31 23:29:08,872: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:08,873: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:29:08,875: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:18.873799+00:00]
[2020-05-31 23:29:08,880: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:18.877550+00:00]
[2020-05-31 23:29:08,940: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] retry: Retry in 10s: Retry(Retry(...), Exception('i have filled now'), 10)
[2020-05-31 23:29:19,144: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:19,145: WARNING/ForkPoolWorker-1] ended
[2020-05-31 23:29:19,205: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] succeeded in 0.326258520995907s: None
[2020-05-31 23:29:19,444: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:19,445: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:29:19,505: ERROR/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] raised unexpected: Exception('i have filled now')
Traceback (most recent call last):
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/trace.py", line 650, in __protected_call__
return self.run(*args, **kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/base.py", line 500, in run
raise task.retry(exc=exc, **retry_kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/task.py", line 704, in retry
raise_with_context(exc)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/base.py", line 487, in run
return task._orig_run(*args, **kwargs)
File "/var/www/django_projects/earthalytics-api/api_v3/tests.py", line 26, in execute
self.retry(exc=Exception("i have filled now"), args=[param_a, param_b], kwargs=kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/task.py", line 704, in retry
raise_with_context(exc)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/utils/serialization.py", line 288, in raise_with_context
_raise_with_context(exc, exc_info[1])
File "<string>", line 1, in _raise_with_context
Exception: i have filled now
|
Exception
|
def run(*args, **kwargs):
try:
return task._orig_run(*args, **kwargs)
except Ignore:
# If Ignore signal occures task shouldn't be retried,
# even if it suits autoretry_for list
raise
except Retry:
raise
except autoretry_for as exc:
if retry_backoff:
retry_kwargs["countdown"] = get_exponential_backoff_interval(
factor=retry_backoff,
retries=task.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter,
)
raise task.retry(exc=exc, **retry_kwargs)
|
def run(*args, **kwargs):
try:
return task._orig_run(*args, **kwargs)
except Ignore:
# If Ignore signal occures task shouldn't be retried,
# even if it suits autoretry_for list
raise
except autoretry_for as exc:
if retry_backoff:
retry_kwargs["countdown"] = get_exponential_backoff_interval(
factor=retry_backoff,
retries=task.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter,
)
raise task.retry(exc=exc, **retry_kwargs)
|
https://github.com/celery/celery/issues/6135
|
[2020-05-31 23:28:34,434: INFO/MainProcess] Connected to amqp://remote_worker:**@127.0.0.1:5672//
[2020-05-31 23:28:34,453: INFO/MainProcess] mingle: searching for neighbors
[2020-05-31 23:28:35,487: INFO/MainProcess] mingle: all alone
[2020-05-31 23:28:35,528: WARNING/MainProcess] /home/ubuntu/.local/lib/python3.7/site-packages/celery/fixups/django.py:203: UserWarning: Using settings.DEBUG leads to a memory
leak, never use this setting in production environments!
leak, never use this setting in production environments!''')
[2020-05-31 23:28:35,529: INFO/MainProcess] celery@testroom ready.
[2020-05-31 23:28:47,351: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906]
[2020-05-31 23:28:47,689: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:28:47,690: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:28:47,721: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:28:57.692348+00:00]
[2020-05-31 23:28:47,722: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:28:57.716321+00:00]
[2020-05-31 23:28:47,777: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] retry: Retry in 10s: Retry(Retry(...), Exception('i have filled now'), 10)
[2020-05-31 23:28:57,999: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:28:58,000: WARNING/ForkPoolWorker-1] ended
[2020-05-31 23:28:58,062: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] succeeded in 0.34440315900428686s: None
[2020-05-31 23:28:58,301: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:28:58,302: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:28:58,304: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:08.303091+00:00]
[2020-05-31 23:28:58,307: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:08.306141+00:00]
[2020-05-31 23:28:58,368: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] retry: Retry in 10s: Retry(Retry(...), Exception('i have filled now'), 10)
[2020-05-31 23:29:08,572: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:08,573: WARNING/ForkPoolWorker-1] ended
[2020-05-31 23:29:08,633: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] succeeded in 0.3256059319974156s: None
[2020-05-31 23:29:08,872: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:08,873: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:29:08,875: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:18.873799+00:00]
[2020-05-31 23:29:08,880: INFO/MainProcess] Received task: api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] ETA:[2020-05-31 23:29:18.877550+00:00]
[2020-05-31 23:29:08,940: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] retry: Retry in 10s: Retry(Retry(...), Exception('i have filled now'), 10)
[2020-05-31 23:29:19,144: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:19,145: WARNING/ForkPoolWorker-1] ended
[2020-05-31 23:29:19,205: INFO/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] succeeded in 0.326258520995907s: None
[2020-05-31 23:29:19,444: WARNING/ForkPoolWorker-1] started
[2020-05-31 23:29:19,445: WARNING/ForkPoolWorker-1] retry
[2020-05-31 23:29:19,505: ERROR/ForkPoolWorker-1] Task api_v3.tests.execute[e97d93b5-b0e5-4b87-96ab-1aab66119906] raised unexpected: Exception('i have filled now')
Traceback (most recent call last):
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/trace.py", line 650, in __protected_call__
return self.run(*args, **kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/base.py", line 500, in run
raise task.retry(exc=exc, **retry_kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/task.py", line 704, in retry
raise_with_context(exc)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/base.py", line 487, in run
return task._orig_run(*args, **kwargs)
File "/var/www/django_projects/earthalytics-api/api_v3/tests.py", line 26, in execute
self.retry(exc=Exception("i have filled now"), args=[param_a, param_b], kwargs=kwargs)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/app/task.py", line 704, in retry
raise_with_context(exc)
File "/home/ubuntu/.local/lib/python3.7/site-packages/celery/utils/serialization.py", line 288, in raise_with_context
_raise_with_context(exc, exc_info[1])
File "<string>", line 1, in _raise_with_context
Exception: i have filled now
|
Exception
|
def __or__(self, other):
# These could be implemented in each individual class,
# I'm sure, but for now we have this.
if isinstance(self, group):
# group() | task -> chord
return chord(self, body=other, app=self._app)
elif isinstance(other, group):
# unroll group with one member
other = maybe_unroll_group(other)
if isinstance(self, _chain):
# chain | group() -> chain
tasks = self.unchain_tasks()
if not tasks:
# If the chain is empty, return the group
return other
return _chain(seq_concat_item(tasks, other), app=self._app)
# task | group() -> chain
return _chain(self, other, app=self.app)
if not isinstance(self, _chain) and isinstance(other, _chain):
# task | chain -> chain
return _chain(seq_concat_seq((self,), other.unchain_tasks()), app=self._app)
elif isinstance(other, _chain):
# chain | chain -> chain
return _chain(
seq_concat_seq(self.unchain_tasks(), other.unchain_tasks()), app=self._app
)
elif isinstance(self, chord):
# chord | task -> attach to body
sig = self.clone()
sig.body = sig.body | other
return sig
elif isinstance(other, Signature):
if isinstance(self, _chain):
if self.tasks and isinstance(self.tasks[-1], group):
# CHAIN [last item is group] | TASK -> chord
sig = self.clone()
sig.tasks[-1] = chord(sig.tasks[-1], other, app=self._app)
return sig
elif self.tasks and isinstance(self.tasks[-1], chord):
# CHAIN [last item is chord] -> chain with chord body.
sig = self.clone()
sig.tasks[-1].body = sig.tasks[-1].body | other
return sig
else:
# chain | task -> chain
return _chain(
seq_concat_item(self.unchain_tasks(), other), app=self._app
)
# task | task -> chain
return _chain(self, other, app=self._app)
return NotImplemented
|
def __or__(self, other):
# These could be implemented in each individual class,
# I'm sure, but for now we have this.
if isinstance(self, group):
# group() | task -> chord
return chord(self, body=other, app=self._app)
elif isinstance(other, group):
# unroll group with one member
other = maybe_unroll_group(other)
if isinstance(self, _chain):
# chain | group() -> chain
return _chain(seq_concat_item(self.unchain_tasks(), other), app=self._app)
# task | group() -> chain
return _chain(self, other, app=self.app)
if not isinstance(self, _chain) and isinstance(other, _chain):
# task | chain -> chain
return _chain(seq_concat_seq((self,), other.unchain_tasks()), app=self._app)
elif isinstance(other, _chain):
# chain | chain -> chain
return _chain(
seq_concat_seq(self.unchain_tasks(), other.unchain_tasks()), app=self._app
)
elif isinstance(self, chord):
# chord | task -> attach to body
sig = self.clone()
sig.body = sig.body | other
return sig
elif isinstance(other, Signature):
if isinstance(self, _chain):
if self.tasks and isinstance(self.tasks[-1], group):
# CHAIN [last item is group] | TASK -> chord
sig = self.clone()
sig.tasks[-1] = chord(sig.tasks[-1], other, app=self._app)
return sig
elif self.tasks and isinstance(self.tasks[-1], chord):
# CHAIN [last item is chord] -> chain with chord body.
sig = self.clone()
sig.tasks[-1].body = sig.tasks[-1].body | other
return sig
else:
# chain | task -> chain
return _chain(
seq_concat_item(self.unchain_tasks(), other), app=self._app
)
# task | task -> chain
return _chain(self, other, app=self._app)
return NotImplemented
|
https://github.com/celery/celery/issues/5973
|
Traceback (most recent call last):
File "test.py", line 30, in <module>
chain([chain(s2)]).apply_async() # issue
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 642, in apply_async
dict(self.options, **options) if options else self.options))
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 660, in run
task_id, group_id, chord,
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 721, in prepare_steps
task = task.clone(args, kwargs)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
...
..
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
keeps repeating
..
..
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 617, in clone
signature = Signature.clone(self, *args, **kwargs)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 272, in clone
app=self._app)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 153, in from_dict
return target_cls.from_dict(d, app=app)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 599, in from_dict
return _upgrade(d, _chain(tasks, app=app, **d['options']))
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 602, in __init__
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
File "/bb/bin/dl/celery/4.4/kombu/utils/functional.py", line 256, in is_list
return isinstance(l, iters) and not isinstance(l, scalars or ())
File "/opt/bb/lib/python2.7/abc.py", line 132, in __instancecheck__
if subclass is not None and subclass in cls._abc_cache:
File "/opt/bb/lib/python2.7/_weakrefset.py", line 72, in __contains__
wr = ref(item)
RuntimeError: maximum recursion depth exceeded
|
RuntimeError
|
def unchain_tasks(self):
# Clone chain's tasks assigning signatures from link_error
# to each task
tasks = [t.clone() for t in self.tasks]
for sig in self.options.get("link_error", []):
for task in tasks:
task.link_error(sig)
return tasks
|
def unchain_tasks(self):
# Clone chain's tasks assigning sugnatures from link_error
# to each task
tasks = [t.clone() for t in self.tasks]
for sig in self.options.get("link_error", []):
for task in tasks:
task.link_error(sig)
return tasks
|
https://github.com/celery/celery/issues/5973
|
Traceback (most recent call last):
File "test.py", line 30, in <module>
chain([chain(s2)]).apply_async() # issue
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 642, in apply_async
dict(self.options, **options) if options else self.options))
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 660, in run
task_id, group_id, chord,
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 721, in prepare_steps
task = task.clone(args, kwargs)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
...
..
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
keeps repeating
..
..
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 617, in clone
signature = Signature.clone(self, *args, **kwargs)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 272, in clone
app=self._app)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 153, in from_dict
return target_cls.from_dict(d, app=app)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 599, in from_dict
return _upgrade(d, _chain(tasks, app=app, **d['options']))
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 602, in __init__
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
File "/bb/bin/dl/celery/4.4/kombu/utils/functional.py", line 256, in is_list
return isinstance(l, iters) and not isinstance(l, scalars or ())
File "/opt/bb/lib/python2.7/abc.py", line 132, in __instancecheck__
if subclass is not None and subclass in cls._abc_cache:
File "/opt/bb/lib/python2.7/_weakrefset.py", line 72, in __contains__
wr = ref(item)
RuntimeError: maximum recursion depth exceeded
|
RuntimeError
|
def __new__(cls, *tasks, **kwargs):
# This forces `chain(X, Y, Z)` to work the same way as `X | Y | Z`
if not kwargs and tasks:
if len(tasks) != 1 or is_list(tasks[0]):
tasks = tasks[0] if len(tasks) == 1 else tasks
# if is_list(tasks) and len(tasks) == 1:
# return super(chain, cls).__new__(cls, tasks, **kwargs)
return reduce(operator.or_, tasks, chain())
return super(chain, cls).__new__(cls, *tasks, **kwargs)
|
def __new__(cls, *tasks, **kwargs):
# This forces `chain(X, Y, Z)` to work the same way as `X | Y | Z`
if not kwargs and tasks:
if len(tasks) != 1 or is_list(tasks[0]):
tasks = tasks[0] if len(tasks) == 1 else tasks
return reduce(operator.or_, tasks)
return super(chain, cls).__new__(cls, *tasks, **kwargs)
|
https://github.com/celery/celery/issues/5973
|
Traceback (most recent call last):
File "test.py", line 30, in <module>
chain([chain(s2)]).apply_async() # issue
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 642, in apply_async
dict(self.options, **options) if options else self.options))
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 660, in run
task_id, group_id, chord,
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 721, in prepare_steps
task = task.clone(args, kwargs)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
...
..
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 620, in clone
for sig in signature.kwargs['tasks']
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 1513, in maybe_signature
d = d.clone()
keeps repeating
..
..
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 617, in clone
signature = Signature.clone(self, *args, **kwargs)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 272, in clone
app=self._app)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 153, in from_dict
return target_cls.from_dict(d, app=app)
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 599, in from_dict
return _upgrade(d, _chain(tasks, app=app, **d['options']))
File "/bb/bin/dl/celery/4.4/celery/canvas.py", line 602, in __init__
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
File "/bb/bin/dl/celery/4.4/kombu/utils/functional.py", line 256, in is_list
return isinstance(l, iters) and not isinstance(l, scalars or ())
File "/opt/bb/lib/python2.7/abc.py", line 132, in __instancecheck__
if subclass is not None and subclass in cls._abc_cache:
File "/opt/bb/lib/python2.7/_weakrefset.py", line 72, in __contains__
wr = ref(item)
RuntimeError: maximum recursion depth exceeded
|
RuntimeError
|
def __call__(self, *args, **kwargs):
_task_stack.push(self)
self.push_request(args=args, kwargs=kwargs)
try:
return self.run(*args, **kwargs)
finally:
self.pop_request()
_task_stack.pop()
|
def __call__(self, *args, **kwargs):
logger = get_logger(__name__)
def handle_sigterm(signum, frame):
logger.info("SIGTERM received, waiting till the task finished")
signal.signal(signal.SIGTERM, handle_sigterm)
_task_stack.push(self)
self.push_request(args=args, kwargs=kwargs)
try:
return self.run(*args, **kwargs)
finally:
self.pop_request()
_task_stack.pop()
|
https://github.com/celery/celery/issues/5775
|
Traceback (most recent call last):
File "/app/pipelines/serializers.py", line 184, in create
remote_worker=validated_data['with_remote'],
File "/usr/local/lib/python3.7/site-packages/celery/local.py", line 191, in __call__
return self._get_current_object()(*a, **kw)
File "/usr/local/lib/python3.7/site-packages/celery/app/task.py", line 396, in __call__
signal.signal(signal.SIGTERM, handle_sigterm)
File "/usr/local/lib/python3.7/signal.py", line 47, in signal
handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
ValueError: signal only works in main thread
|
ValueError
|
def remove_if_stale(self):
"""Remove the lock if the process isn't running.
I.e. process does not respons to signal.
"""
try:
pid = self.read_pid()
except ValueError:
print("Broken pidfile found - Removing it.", file=sys.stderr)
self.remove()
return True
if not pid:
self.remove()
return True
try:
os.kill(pid, 0)
except os.error as exc:
if exc.errno == errno.ESRCH or exc.errno == errno.EPERM:
print("Stale pidfile exists - Removing it.", file=sys.stderr)
self.remove()
return True
except SystemError:
print("Stale pidfile exists - Removing it.", file=sys.stderr)
self.remove()
return True
return False
|
def remove_if_stale(self):
"""Remove the lock if the process isn't running.
I.e. process does not respons to signal.
"""
try:
pid = self.read_pid()
except ValueError:
print("Broken pidfile found - Removing it.", file=sys.stderr)
self.remove()
return True
if not pid:
self.remove()
return True
try:
os.kill(pid, 0)
except os.error as exc:
if exc.errno == errno.ESRCH:
print("Stale pidfile exists - Removing it.", file=sys.stderr)
self.remove()
return True
except SystemError:
print("Stale pidfile exists - Removing it.", file=sys.stderr)
self.remove()
return True
return False
|
https://github.com/celery/celery/issues/5409
|
os.kill(2646, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
OSError: [Errno 1] Operation not permitted
|
OSError
|
def join(
self,
timeout=None,
propagate=True,
interval=0.5,
callback=None,
no_ack=True,
on_message=None,
disable_sync_subtasks=True,
on_interval=None,
):
"""Gather the results of all tasks as a list in order.
Note:
This can be an expensive operation for result store
backends that must resort to polling (e.g., database).
You should consider using :meth:`join_native` if your backend
supports it.
Warning:
Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.
Arguments:
timeout (float): The number of seconds to wait for results
before the operation times out.
propagate (bool): If any of the tasks raises an exception,
the exception will be re-raised when this flag is set.
interval (float): Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this does not have
any effect when using the amqp result store backend,
as it does not use polling.
callback (Callable): Optional callback to be called for every
result received. Must have signature ``(task_id, value)``
No results will be returned by this function if a callback
is specified. The order of results is also arbitrary when a
callback is used. To get access to the result object for
a particular id you'll have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).
no_ack (bool): Automatic message acknowledgment (Note that if this
is set to :const:`False` then the messages
*will not be acknowledged*).
disable_sync_subtasks (bool): Disable tasks to wait for sub tasks
this is the default configuration. CAUTION do not enable this
unless you must.
Raises:
celery.exceptions.TimeoutError: if ``timeout`` isn't
:const:`None` and the operation takes longer than ``timeout``
seconds.
"""
if disable_sync_subtasks:
assert_will_not_block()
time_start = monotonic()
remaining = None
if on_message is not None:
raise ImproperlyConfigured("Backend does not support on_message callback")
results = []
for result in self.results:
remaining = None
if timeout:
remaining = timeout - (monotonic() - time_start)
if remaining <= 0.0:
raise TimeoutError("join operation timed out")
value = result.get(
timeout=remaining,
propagate=propagate,
interval=interval,
no_ack=no_ack,
on_interval=on_interval,
disable_sync_subtasks=disable_sync_subtasks,
)
if callback:
callback(result.id, value)
else:
results.append(value)
return results
|
def join(
self,
timeout=None,
propagate=True,
interval=0.5,
callback=None,
no_ack=True,
on_message=None,
disable_sync_subtasks=True,
on_interval=None,
):
"""Gather the results of all tasks as a list in order.
Note:
This can be an expensive operation for result store
backends that must resort to polling (e.g., database).
You should consider using :meth:`join_native` if your backend
supports it.
Warning:
Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.
Arguments:
timeout (float): The number of seconds to wait for results
before the operation times out.
propagate (bool): If any of the tasks raises an exception,
the exception will be re-raised when this flag is set.
interval (float): Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this does not have
any effect when using the amqp result store backend,
as it does not use polling.
callback (Callable): Optional callback to be called for every
result received. Must have signature ``(task_id, value)``
No results will be returned by this function if a callback
is specified. The order of results is also arbitrary when a
callback is used. To get access to the result object for
a particular id you'll have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).
no_ack (bool): Automatic message acknowledgment (Note that if this
is set to :const:`False` then the messages
*will not be acknowledged*).
disable_sync_subtasks (bool): Disable tasks to wait for sub tasks
this is the default configuration. CAUTION do not enable this
unless you must.
Raises:
celery.exceptions.TimeoutError: if ``timeout`` isn't
:const:`None` and the operation takes longer than ``timeout``
seconds.
"""
if disable_sync_subtasks:
assert_will_not_block()
time_start = monotonic()
remaining = None
if on_message is not None:
raise ImproperlyConfigured("Backend does not support on_message callback")
results = []
for result in self.results:
remaining = None
if timeout:
remaining = timeout - (monotonic() - time_start)
if remaining <= 0.0:
raise TimeoutError("join operation timed out")
value = result.get(
timeout=remaining,
propagate=propagate,
interval=interval,
no_ack=no_ack,
on_interval=on_interval,
)
if callback:
callback(result.id, value)
else:
results.append(value)
return results
|
https://github.com/celery/celery/issues/5736
|
Traceback (most recent call last):
File "/home/gsfish/.pyenv/versions/scan_detect/lib/python3.5/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/gsfish/.pyenv/versions/scan_detect/lib/python3.5/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/home/gsfish/work/netease/project/scan_detect/tasks.py", line 106, in test
return result_set.get(disable_sync_subtasks=False)
File "/home/gsfish/.pyenv/versions/scan_detect/lib/python3.5/site-packages/celery/result.py", line 697, in get
on_interval=on_interval,
File "/home/gsfish/.pyenv/versions/scan_detect/lib/python3.5/site-packages/celery/result.py", line 765, in join
interval=interval, no_ack=no_ack, on_interval=on_interval,
File "/home/gsfish/.pyenv/versions/scan_detect/lib/python3.5/site-packages/celery/result.py", line 205, in get
assert_will_not_block()
File "/home/gsfish/.pyenv/versions/scan_detect/lib/python3.5/site-packages/celery/result.py", line 41, in assert_will_not_block
raise RuntimeError(E_WOULDBLOCK)
RuntimeError: Never call result.get() within a task!
See http://docs.celeryq.org/en/latest/userguide/tasks.html#task-synchronous-subtasks
|
RuntimeError
|
def prepare_steps(
self,
args,
kwargs,
tasks,
root_id=None,
parent_id=None,
link_error=None,
app=None,
last_task_id=None,
group_id=None,
chord_body=None,
clone=True,
from_dict=Signature.from_dict,
):
app = app or self.app
# use chain message field for protocol 2 and later.
# this avoids pickle blowing the stack on the recursion
# required by linking task together in a tree structure.
# (why is pickle using recursion? or better yet why cannot python
# do tail call optimization making recursion actually useful?)
use_link = self._use_link
if use_link is None and app.conf.task_protocol == 1:
use_link = True
steps = deque(tasks)
steps_pop = steps.pop
steps_extend = steps.extend
prev_task = None
prev_res = None
tasks, results = [], []
i = 0
# NOTE: We are doing this in reverse order.
# The result is a list of tasks in reverse order, that is
# passed as the ``chain`` message field.
# As it's reversed the worker can just do ``chain.pop()`` to
# get the next task in the chain.
while steps:
task = steps_pop()
is_first_task, is_last_task = not steps, not i
if not isinstance(task, abstract.CallableSignature):
task = from_dict(task, app=app)
if isinstance(task, group):
task = maybe_unroll_group(task)
# first task gets partial args from chain
if clone:
if is_first_task:
task = task.clone(args, kwargs)
else:
task = task.clone()
elif is_first_task:
task.args = tuple(args) + tuple(task.args)
if isinstance(task, _chain):
# splice the chain
steps_extend(task.tasks)
continue
if isinstance(task, group) and prev_task:
# automatically upgrade group(...) | s to chord(group, s)
# for chords we freeze by pretending it's a normal
# signature instead of a group.
tasks.pop()
results.pop()
try:
task = chord(
task,
body=prev_task,
task_id=prev_res.task_id,
root_id=root_id,
app=app,
)
except AttributeError:
# A GroupResult does not have a task_id since it consists
# of multiple tasks.
# We therefore, have to construct the chord without it.
# Issues #5467, #3585.
task = chord(
task,
body=prev_task,
root_id=root_id,
app=app,
)
if is_last_task:
# chain(task_id=id) means task id is set for the last task
# in the chain. If the chord is part of a chord/group
# then that chord/group must synchronize based on the
# last task in the chain, so we only set the group_id and
# chord callback for the last task.
res = task.freeze(
last_task_id,
root_id=root_id,
group_id=group_id,
chord=chord_body,
)
else:
res = task.freeze(root_id=root_id)
i += 1
if prev_task:
if use_link:
# link previous task to this task.
task.link(prev_task)
if prev_res and not prev_res.parent:
prev_res.parent = res
if link_error:
for errback in maybe_list(link_error):
task.link_error(errback)
tasks.append(task)
results.append(res)
prev_task, prev_res = task, res
if isinstance(task, chord):
app.backend.ensure_chords_allowed()
# If the task is a chord, and the body is a chain
# the chain has already been prepared, and res is
# set to the last task in the callback chain.
# We need to change that so that it points to the
# group result object.
node = res
while node.parent:
node = node.parent
prev_res = node
return tasks, results
|
def prepare_steps(
self,
args,
kwargs,
tasks,
root_id=None,
parent_id=None,
link_error=None,
app=None,
last_task_id=None,
group_id=None,
chord_body=None,
clone=True,
from_dict=Signature.from_dict,
):
app = app or self.app
# use chain message field for protocol 2 and later.
# this avoids pickle blowing the stack on the recursion
# required by linking task together in a tree structure.
# (why is pickle using recursion? or better yet why cannot python
# do tail call optimization making recursion actually useful?)
use_link = self._use_link
if use_link is None and app.conf.task_protocol == 1:
use_link = True
steps = deque(tasks)
steps_pop = steps.pop
steps_extend = steps.extend
prev_task = None
prev_res = None
tasks, results = [], []
i = 0
# NOTE: We are doing this in reverse order.
# The result is a list of tasks in reverse order, that is
# passed as the ``chain`` message field.
# As it's reversed the worker can just do ``chain.pop()`` to
# get the next task in the chain.
while steps:
task = steps_pop()
is_first_task, is_last_task = not steps, not i
if not isinstance(task, abstract.CallableSignature):
task = from_dict(task, app=app)
if isinstance(task, group):
task = maybe_unroll_group(task)
# first task gets partial args from chain
if clone:
if is_first_task:
task = task.clone(args, kwargs)
else:
task = task.clone()
elif is_first_task:
task.args = tuple(args) + tuple(task.args)
if isinstance(task, _chain):
# splice the chain
steps_extend(task.tasks)
continue
if isinstance(task, group) and prev_task:
# automatically upgrade group(...) | s to chord(group, s)
# for chords we freeze by pretending it's a normal
# signature instead of a group.
tasks.pop()
results.pop()
task = chord(
task,
body=prev_task,
task_id=prev_res.task_id,
root_id=root_id,
app=app,
)
if is_last_task:
# chain(task_id=id) means task id is set for the last task
# in the chain. If the chord is part of a chord/group
# then that chord/group must synchronize based on the
# last task in the chain, so we only set the group_id and
# chord callback for the last task.
res = task.freeze(
last_task_id,
root_id=root_id,
group_id=group_id,
chord=chord_body,
)
else:
res = task.freeze(root_id=root_id)
i += 1
if prev_task:
if use_link:
# link previous task to this task.
task.link(prev_task)
if prev_res and not prev_res.parent:
prev_res.parent = res
if link_error:
for errback in maybe_list(link_error):
task.link_error(errback)
tasks.append(task)
results.append(res)
prev_task, prev_res = task, res
if isinstance(task, chord):
app.backend.ensure_chords_allowed()
# If the task is a chord, and the body is a chain
# the chain has already been prepared, and res is
# set to the last task in the callback chain.
# We need to change that so that it points to the
# group result object.
node = res
while node.parent:
node = node.parent
prev_res = node
return tasks, results
|
https://github.com/celery/celery/issues/5467
|
Traceback (most recent call last):
File "debug_run.py", line 19, in <module>
res.delay()
File "/home/ja04913/ocenter/ocenter-venv/lib/python2.7/site-packages/celery/canvas.py", line 179, in delay
return self.apply_async(partial_args, partial_kwargs)
File "/home/ja04913/ocenter/ocenter-venv/lib/python2.7/site-packages/celery/canvas.py", line 557, in apply_async
dict(self.options, **options) if options else self.options))
File "/home/ja04913/ocenter/ocenter-venv/lib/python2.7/site-packages/celery/canvas.py", line 573, in run
task_id, group_id, chord,
File "/home/ja04913/ocenter/ocenter-venv/lib/python2.7/site-packages/celery/canvas.py", line 655, in prepare_steps
task_id=prev_res.task_id, root_id=root_id, app=app,
AttributeError: 'GroupResult' object has no attribute 'task_id'
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.