after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def safe_update_index(path):
chunk_id = path.rsplit("/", 1)[-1]
if len(chunk_id) != STRLEN_CHUNKID:
return
for c in chunk_id:
if c not in hexdigits:
return
try:
self.update_index(path)
self.successes += 1
self.logger.debug("Updated %s", path)
except OioNetworkException as exc:
self.errors += 1
self.logger.warn("ERROR while updating %s: %s", path, exc)
except VolumeException as exc:
self.errors += 1
self.logger.error("Cannot index %s: %s", path, exc)
# All chunks of this volume are indexed in the same service,
# no need to try another chunk, it will generate the same
# error. Let the upper level retry later.
raise
except Exception:
self.errors += 1
self.logger.exception("ERROR while updating %s", path)
self.total_since_last_reported += 1
|
def safe_update_index(path):
chunk_id = path.rsplit("/", 1)[-1]
if len(chunk_id) != STRLEN_CHUNKID:
return
for c in chunk_id:
if c not in hexdigits:
return
try:
self.update_index(path)
self.successes += 1
self.logger.debug("Updated %s", path)
except OioNetworkException as exc:
self.errors += 1
self.logger.warn("ERROR while updating %s: %s", path, exc)
except Exception:
self.errors += 1
self.logger.exception("ERROR while updating %s", path)
self.total_since_last_reported += 1
|
https://github.com/open-io/oio-sds/issues/1512
|
11573 7F920C7A0730 log ERROR ERROR while updating /var/lib/oio/sds/OPENIO/rawx-0/7BD/7BDE49669C6616A0656097C6544923F1BB0EE711A23D9D18B7D23071997AB9F7
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/oio/blob/indexer.py", line 65, in safe_update_index
self.update_index(path)
File "/usr/lib/python2.7/dist-packages/oio/blob/indexer.py", line 132, in update_index
**data)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 298, in chunk_push
json=body, headers=headers)
File "/usr/lib/python2.7/dist-packages/oio/common/utils.py", line 586, in ensure_headers_wrapper
return func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/utils.py", line 596, in ensure_request_id_wrapper
return func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 273, in _rdir_request
uri = self._make_uri(action, volume)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 264, in _make_uri
rdir_host = self._get_rdir_addr(volume_id)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 261, in _get_rdir_addr
raise VolumeException('No rdir assigned to volume %s' % volume_id)
VolumeException: No rdir assigned to volume 172.18.0.1:6004
|
VolumeException
|
def run(self, *args, **kwargs):
time.sleep(random() * self.interval)
while True:
pre = time.time()
try:
self.index_pass()
except VolumeException as exc:
self.logger.error("Cannot index chunks, will retry later: %s", exc)
except Exception as exc:
self.logger.exception("ERROR during indexing: %s", exc)
else:
self.passes += 1
elapsed = (time.time() - pre) or 0.000001
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
|
def run(self, *args, **kwargs):
time.sleep(random() * self.interval)
while True:
pre = time.time()
try:
self.index_pass()
except Exception as e:
self.logger.exception("ERROR during indexing: %s" % e)
else:
self.passes += 1
elapsed = (time.time() - pre) or 0.000001
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
|
https://github.com/open-io/oio-sds/issues/1512
|
11573 7F920C7A0730 log ERROR ERROR while updating /var/lib/oio/sds/OPENIO/rawx-0/7BD/7BDE49669C6616A0656097C6544923F1BB0EE711A23D9D18B7D23071997AB9F7
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/oio/blob/indexer.py", line 65, in safe_update_index
self.update_index(path)
File "/usr/lib/python2.7/dist-packages/oio/blob/indexer.py", line 132, in update_index
**data)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 298, in chunk_push
json=body, headers=headers)
File "/usr/lib/python2.7/dist-packages/oio/common/utils.py", line 586, in ensure_headers_wrapper
return func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/utils.py", line 596, in ensure_request_id_wrapper
return func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 273, in _rdir_request
uri = self._make_uri(action, volume)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 264, in _make_uri
rdir_host = self._get_rdir_addr(volume_id)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 261, in _get_rdir_addr
raise VolumeException('No rdir assigned to volume %s' % volume_id)
VolumeException: No rdir assigned to volume 172.18.0.1:6004
|
VolumeException
|
def object_truncate(self, account, container, obj, version=None, size=None, **kwargs):
"""
Truncate object at specified size. Only shrink is supported.
A download may occur if size is not on chunk boundaries.
:param account: name of the account in which the object is stored
:param container: name of the container in which the object is stored
:param obj: name of the object to query
:param version: version of the object to query
:param size: new size of object
"""
# code copied from object_fetch (should be factorized !)
meta, raw_chunks = self.object_locate(
account, container, obj, version=version, properties=False, **kwargs
)
chunk_method = meta["chunk_method"]
storage_method = STORAGE_METHODS.load(chunk_method)
chunks = _sort_chunks(raw_chunks, storage_method.ec)
for pos in sorted(chunks.keys()):
chunk = chunks[pos][0]
if size >= chunk["offset"] and size <= chunk["offset"] + chunk["size"]:
break
else:
raise exc.OioException("No chunk found at position %d" % size)
if chunk["offset"] != size:
# retrieve partial chunk
ret = self.object_fetch(
account,
container,
obj,
version=version,
ranges=[(chunk["offset"], size - 1)],
)
# TODO implement a proper object_update
pos = int(chunk["pos"].split(".")[0])
self.object_create(
account,
container,
obj_name=obj,
data=ret[1],
meta_pos=pos,
content_id=meta["id"],
)
return self.container.content_truncate(
account, container, obj, version=version, size=size, **kwargs
)
|
def object_truncate(self, account, container, obj, version=None, size=None, **kwargs):
"""
Truncate object at specified size. Only shrink is supported.
A download may occur if size is not on chunk boundaries.
:param account: name of the account in which the object is stored
:param container: name of the container in which the object is stored
:param obj: name of the object to query
:param version: version of the object to query
:param size: new size of object
"""
# code copied from object_fetch (should be factorized !)
meta, raw_chunks = self.object_locate(
account, container, obj, version=version, **kwargs
)
chunk_method = meta["chunk_method"]
storage_method = STORAGE_METHODS.load(chunk_method)
chunks = _sort_chunks(raw_chunks, storage_method.ec)
for pos in sorted(chunks.keys()):
chunk = chunks[pos][0]
if size >= chunk["offset"] and size <= chunk["offset"] + chunk["size"]:
break
else:
raise exc.OioException("No chunk found at position %d" % size)
if chunk["offset"] != size:
# retrieve partial chunk
ret = self.object_fetch(
account,
container,
obj,
version=version,
ranges=[(chunk["offset"], size - 1)],
)
# TODO implement a proper object_update
pos = int(chunk["pos"].split(".")[0])
self.object_create(
account,
container,
obj_name=obj,
data=ret[1],
meta_pos=pos,
content_id=meta["id"],
)
return self.container.content_truncate(
account, container, obj, version=version, size=size, **kwargs
)
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def object_locate(
self, account, container, obj, version=None, properties=True, **kwargs
):
"""
Get a description of the object along with the list of its chunks.
:param account: name of the account in which the object is stored
:param container: name of the container in which the object is stored
:param obj: name of the object to query
:param version: version of the object to query
:param properties: should the request return object properties
along with content description
:type properties: `bool`
:returns: a tuple with object metadata `dict` as first element
and chunk `list` as second element
"""
obj_meta, chunks = self.container.content_locate(
account, container, obj, version=version, properties=properties, **kwargs
)
return obj_meta, chunks
|
def object_locate(self, account, container, obj, version=None, **kwargs):
"""
Get a description of the object along with the list of its chunks.
:param account: name of the account in which the object is stored
:param container: name of the container in which the object is stored
:param obj: name of the object to query
:param version: version of the object to query
:returns: a tuple with object metadata `dict` as first element
and chunk `list` as second element
"""
obj_meta, chunks = self.container.content_locate(
account, container, obj, version=version, **kwargs
)
return obj_meta, chunks
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def object_fetch(
self, account, container, obj, version=None, ranges=None, key_file=None, **kwargs
):
"""
Download an object.
:param account: name of the account in which the object is stored
:param container: name of the container in which the object is stored
:param obj: name of the object to fetch
:param version: version of the object to fetch
:type version: `str`
:param ranges: a list of object ranges to download
:type ranges: `list` of `tuple`
:param key_file: path to the file containing credentials
:keyword properties: should the request return object properties
along with content description (True by default)
:type properties: `bool`
:returns: a dictionary of object metadata and
a stream of object data
:rtype: tuple
"""
meta, raw_chunks = self.object_locate(
account, container, obj, version=version, **kwargs
)
chunk_method = meta["chunk_method"]
storage_method = STORAGE_METHODS.load(chunk_method)
chunks = _sort_chunks(raw_chunks, storage_method.ec)
meta["container_id"] = name2cid(account, container).upper()
meta["ns"] = self.namespace
self._patch_timeouts(kwargs)
if storage_method.ec:
stream = fetch_stream_ec(chunks, ranges, storage_method, **kwargs)
elif storage_method.backblaze:
stream = self._fetch_stream_backblaze(
meta, chunks, ranges, storage_method, key_file, **kwargs
)
else:
stream = fetch_stream(chunks, ranges, storage_method, **kwargs)
return meta, stream
|
def object_fetch(
self, account, container, obj, version=None, ranges=None, key_file=None, **kwargs
):
meta, raw_chunks = self.object_locate(
account, container, obj, version=version, **kwargs
)
chunk_method = meta["chunk_method"]
storage_method = STORAGE_METHODS.load(chunk_method)
chunks = _sort_chunks(raw_chunks, storage_method.ec)
meta["container_id"] = name2cid(account, container).upper()
meta["ns"] = self.namespace
self._patch_timeouts(kwargs)
if storage_method.ec:
stream = fetch_stream_ec(chunks, ranges, storage_method, **kwargs)
elif storage_method.backblaze:
stream = self._fetch_stream_backblaze(
meta, chunks, ranges, storage_method, key_file, **kwargs
)
else:
stream = fetch_stream(chunks, ranges, storage_method, **kwargs)
return meta, stream
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def chunk_audit(self, path):
with open(path) as f:
try:
meta = read_chunk_metadata(f)
except exc.MissingAttribute as e:
raise exc.FaultyChunk("Missing extended attribute %s" % e)
size = int(meta["chunk_size"])
md5_checksum = meta["chunk_hash"].lower()
reader = ChunkReader(f, size, md5_checksum)
with closing(reader):
for buf in reader:
buf_len = len(buf)
self.bytes_running_time = ratelimit(
self.bytes_running_time,
self.max_bytes_per_second,
increment=buf_len,
)
self.bytes_processed += buf_len
self.total_bytes_processed += buf_len
try:
container_id = meta["container_id"]
content_path = meta["content_path"]
_obj_meta, data = self.container_client.content_locate(
cid=container_id, path=content_path, properties=False
)
# Check chunk data
chunk_data = None
metachunks = set()
for c in data:
if c["url"].endswith(meta["chunk_id"]):
metachunks.add(c["pos"].split(".", 2)[0])
chunk_data = c
if not chunk_data:
raise exc.OrphanChunk("Not found in content")
if chunk_data["size"] != int(meta["chunk_size"]):
raise exc.FaultyChunk("Invalid chunk size found")
if chunk_data["hash"] != meta["chunk_hash"]:
raise exc.FaultyChunk("Invalid chunk hash found")
if chunk_data["pos"] != meta["chunk_pos"]:
raise exc.FaultyChunk("Invalid chunk position found")
except exc.NotFound:
raise exc.OrphanChunk("Chunk not found in container")
|
def chunk_audit(self, path):
with open(path) as f:
try:
meta = read_chunk_metadata(f)
except exc.MissingAttribute as e:
raise exc.FaultyChunk("Missing extended attribute %s" % e)
size = int(meta["chunk_size"])
md5_checksum = meta["chunk_hash"].lower()
reader = ChunkReader(f, size, md5_checksum)
with closing(reader):
for buf in reader:
buf_len = len(buf)
self.bytes_running_time = ratelimit(
self.bytes_running_time,
self.max_bytes_per_second,
increment=buf_len,
)
self.bytes_processed += buf_len
self.total_bytes_processed += buf_len
try:
container_id = meta["container_id"]
content_path = meta["content_path"]
content_attr, data = self.container_client.content_locate(
cid=container_id, path=content_path
)
# Check chunk data
chunk_data = None
metachunks = set()
for c in data:
if c["url"].endswith(meta["chunk_id"]):
metachunks.add(c["pos"].split(".", 2)[0])
chunk_data = c
if not chunk_data:
raise exc.OrphanChunk("Not found in content")
if chunk_data["size"] != int(meta["chunk_size"]):
raise exc.FaultyChunk("Invalid chunk size found")
if chunk_data["hash"] != meta["chunk_hash"]:
raise exc.FaultyChunk("Invalid chunk hash found")
if chunk_data["pos"] != meta["chunk_pos"]:
raise exc.FaultyChunk("Invalid chunk position found")
except exc.NotFound:
raise exc.OrphanChunk("Chunk not found in container")
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def take_action(self, parsed_args):
import os
self.log.debug("take_action(%s)", parsed_args)
account = self.app.client_manager.get_account()
container = parsed_args.container
objs = self.app.client_manager.storage.object_list(account, container)
for obj in objs["objects"]:
obj_name = obj["name"]
_, stream = self.app.client_manager.storage.object_fetch(
account, container, obj_name, properties=False
)
if not os.path.exists(os.path.dirname(obj_name)):
if len(os.path.dirname(obj_name)) > 0:
os.makedirs(os.path.dirname(obj_name))
with open(obj_name, "wb") as f:
for chunk in stream:
f.write(chunk)
|
def take_action(self, parsed_args):
import os
self.log.debug("take_action(%s)", parsed_args)
account = self.app.client_manager.get_account()
container = parsed_args.container
objs = self.app.client_manager.storage.object_list(account, container)
for obj in objs["objects"]:
obj_name = obj["name"]
_, stream = self.app.client_manager.storage.object_fetch(
account, container, obj_name
)
if not os.path.exists(os.path.dirname(obj_name)):
if len(os.path.dirname(obj_name)) > 0:
os.makedirs(os.path.dirname(obj_name))
with open(obj_name, "wb") as f:
for chunk in stream:
f.write(chunk)
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
super(SaveObject, self).take_action(parsed_args)
container = parsed_args.container
obj = parsed_args.object
key_file = parsed_args.key_file
if key_file and key_file[0] != "/":
key_file = os.getcwd() + "/" + key_file
filename = parsed_args.file
if not filename:
filename = obj
if parsed_args.auto:
container = self.flatns_manager(obj)
_meta, stream = self.app.client_manager.storage.object_fetch(
self.app.client_manager.get_account(),
container,
obj,
key_file=key_file,
properties=False,
)
if not os.path.exists(os.path.dirname(filename)):
if len(os.path.dirname(filename)) > 0:
os.makedirs(os.path.dirname(filename))
with open(filename, "wb") as ofile:
for chunk in stream:
ofile.write(chunk)
|
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
super(SaveObject, self).take_action(parsed_args)
container = parsed_args.container
obj = parsed_args.object
key_file = parsed_args.key_file
if key_file and key_file[0] != "/":
key_file = os.getcwd() + "/" + key_file
filename = parsed_args.file
if not filename:
filename = obj
if parsed_args.auto:
container = self.flatns_manager(obj)
meta, stream = self.app.client_manager.storage.object_fetch(
self.app.client_manager.get_account(), container, obj, key_file=key_file
)
if not os.path.exists(os.path.dirname(filename)):
if len(os.path.dirname(filename)) > 0:
os.makedirs(os.path.dirname(filename))
with open(filename, "wb") as ofile:
for chunk in stream:
ofile.write(chunk)
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
super(LocateObject, self).take_action(parsed_args)
account = self.app.client_manager.get_account()
container = parsed_args.container
obj = parsed_args.object
if parsed_args.auto:
container = self.flatns_manager(obj)
data = self.app.client_manager.storage.object_locate(
account, container, obj, version=parsed_args.object_version, properties=False
)
def sort_chunk_pos(c1, c2):
c1_tokens = c1[0].split(".")
c2_tokens = c2[0].split(".")
c1_pos = int(c1_tokens[0])
c2_pos = int(c2_tokens[0])
if len(c1_tokens) == 1 or c1_pos != c2_pos:
return c1_pos - c2_pos
return cmp(c1[0], c2[0])
def get_chunks_info(chunks):
pool_manager = get_pool_manager()
chunk_hash = ""
chunk_size = ""
for c in chunks:
resp = pool_manager.request("HEAD", c["url"])
if resp.status != 200:
chunk_size = "%d %s" % (resp.status, resp.reason)
chunk_hash = "%d %s" % (resp.status, resp.reason)
else:
chunk_size = resp.headers.get(
"X-oio-chunk-meta-chunk-size", "Missing chunk size header"
)
chunk_hash = resp.headers.get(
"X-oio-chunk-meta-chunk-hash", "Missing chunk hash header"
)
yield (c["pos"], c["url"], c["size"], c["hash"], chunk_size, chunk_hash)
columns = ()
chunks = []
if parsed_args.chunk_info:
columns = (
"Pos",
"Id",
"Metachunk size",
"Metachunk hash",
"Chunk size",
"Chunk hash",
)
chunks = get_chunks_info(data[1])
else:
columns = ("Pos", "Id", "Metachunk size", "Metachunk hash")
chunks = ((c["pos"], c["url"], c["size"], c["hash"]) for c in data[1])
return columns, sorted(chunks, cmp=sort_chunk_pos)
|
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
super(LocateObject, self).take_action(parsed_args)
account = self.app.client_manager.get_account()
container = parsed_args.container
obj = parsed_args.object
if parsed_args.auto:
container = self.flatns_manager(obj)
data = self.app.client_manager.storage.object_analyze(
account, container, obj, version=parsed_args.object_version
)
def sort_chunk_pos(c1, c2):
c1_tokens = c1[0].split(".")
c2_tokens = c2[0].split(".")
c1_pos = int(c1_tokens[0])
c2_pos = int(c2_tokens[0])
if len(c1_tokens) == 1 or c1_pos != c2_pos:
return c1_pos - c2_pos
return cmp(c1[0], c2[0])
def get_chunks_info(chunks):
pool_manager = get_pool_manager()
chunk_hash = ""
chunk_size = ""
for c in chunks:
resp = pool_manager.request("HEAD", c["url"])
if resp.status != 200:
chunk_size = "%d %s" % (resp.status, resp.reason)
chunk_hash = "%d %s" % (resp.status, resp.reason)
else:
chunk_size = resp.headers.get(
"X-oio-chunk-meta-chunk-size", "Missing chunk size header"
)
chunk_hash = resp.headers.get(
"X-oio-chunk-meta-chunk-hash", "Missing chunk hash header"
)
yield (c["pos"], c["url"], c["size"], c["hash"], chunk_size, chunk_hash)
columns = ()
chunks = []
if parsed_args.chunk_info:
columns = (
"Pos",
"Id",
"Metachunk size",
"Metachunk hash",
"Chunk size",
"Chunk hash",
)
chunks = get_chunks_info(data[1])
else:
columns = ("Pos", "Id", "Metachunk size", "Metachunk hash")
chunks = ((c["pos"], c["url"], c["size"], c["hash"]) for c in data[1])
return columns, sorted(chunks, cmp=sort_chunk_pos)
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def compute(self, conn, data=None):
tarinfo = TarInfo()
tarinfo.name = self.name
tarinfo.mod = 0o700
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.type = REGTYPE
tarinfo.linkname = ""
if self.name == CONTAINER_PROPERTIES:
meta = data or conn.container_get_properties(self.acct, self.ref)
tarinfo.size = len(json.dumps(meta["properties"], sort_keys=True))
self._filesize = tarinfo.size
self._buf = tarinfo.tobuf(format=PAX_FORMAT)
return
elif self.name == CONTAINER_MANIFEST:
tarinfo.size = len(json.dumps(data, sort_keys=True))
self._filesize = tarinfo.size
self._buf = tarinfo.tobuf(format=PAX_FORMAT)
return
entry = conn.object_get_properties(self.acct, self.ref, self.name)
properties = entry["properties"]
# x-static-large-object
if properties.get(SLO, False):
tarinfo.size = int(properties.get(SLO_SIZE))
_, slo = conn.object_fetch(self.acct, self.ref, self.name, properties=False)
self._slo = json.loads("".join(slo), object_pairs_hook=OrderedDict)
self._checksums = {}
# format MD5 to share same format as multi chunks object
offset = 0
for idx, ck in enumerate(self._slo):
self._checksums[idx] = {
"hash": ck["hash"].upper(),
"size": ck["bytes"],
"offset": offset,
}
offset += ck["bytes"]
else:
tarinfo.size = int(entry["length"])
meta, chunks = conn.object_locate(
self.acct, self.ref, self.name, properties=False
)
storage_method = STORAGE_METHODS.load(meta["chunk_method"])
chunks = _sort_chunks(chunks, storage_method.ec)
for idx in chunks:
chunks[idx] = chunks[idx][0]
del chunks[idx]["url"]
del chunks[idx]["score"]
del chunks[idx]["pos"]
self._checksums = chunks
self._filesize = tarinfo.size
# XATTR
# do we have to store basic properties like policy, ... ?
for key, val in properties.items():
assert isinstance(val, basestring), "Invalid type for %s:%s:%s" % (
self.acct,
self.name,
key,
)
if self.slo and key in SLO_HEADERS:
continue
tarinfo.pax_headers[SCHILY + key] = val
tarinfo.pax_headers["mime_type"] = entry["mime_type"]
self._buf = tarinfo.tobuf(format=PAX_FORMAT)
|
def compute(self, conn, data=None):
tarinfo = TarInfo()
tarinfo.name = self.name
tarinfo.mod = 0o700
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.type = REGTYPE
tarinfo.linkname = ""
if self.name == CONTAINER_PROPERTIES:
meta = data or conn.container_get_properties(self.acct, self.ref)
tarinfo.size = len(json.dumps(meta["properties"], sort_keys=True))
self._filesize = tarinfo.size
self._buf = tarinfo.tobuf(format=PAX_FORMAT)
return
elif self.name == CONTAINER_MANIFEST:
tarinfo.size = len(json.dumps(data, sort_keys=True))
self._filesize = tarinfo.size
self._buf = tarinfo.tobuf(format=PAX_FORMAT)
return
entry = conn.object_get_properties(self.acct, self.ref, self.name)
properties = entry["properties"]
# x-static-large-object
if properties.get(SLO, False):
tarinfo.size = int(properties.get(SLO_SIZE))
_, slo = conn.object_fetch(self.acct, self.ref, self.name)
self._slo = json.loads("".join(slo), object_pairs_hook=OrderedDict)
self._checksums = {}
# format MD5 to share same format as multi chunks object
offset = 0
for idx, ck in enumerate(self._slo):
self._checksums[idx] = {
"hash": ck["hash"].upper(),
"size": ck["bytes"],
"offset": offset,
}
offset += ck["bytes"]
else:
tarinfo.size = int(entry["length"])
meta, chunks = conn.object_locate(self.acct, self.ref, self.name)
storage_method = STORAGE_METHODS.load(meta["chunk_method"])
chunks = _sort_chunks(chunks, storage_method.ec)
for idx in chunks:
chunks[idx] = chunks[idx][0]
del chunks[idx]["url"]
del chunks[idx]["score"]
del chunks[idx]["pos"]
self._checksums = chunks
self._filesize = tarinfo.size
# XATTR
# do we have to store basic properties like policy, ... ?
for key, val in properties.items():
assert isinstance(val, basestring), "Invalid type for %s:%s:%s" % (
self.acct,
self.name,
key,
)
if self.slo and key in SLO_HEADERS:
continue
tarinfo.pax_headers[SCHILY + key] = val
tarinfo.pax_headers["mime_type"] = entry["mime_type"]
self._buf = tarinfo.tobuf(format=PAX_FORMAT)
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def create_tar_oio_stream(self, entry, range_):
"""Extract data from entry from object"""
mem = ""
name = entry["name"]
if range_[0] < entry["hdr_blocks"]:
tar = OioTarEntry(self.storage, self.acct, self.container, name)
for bl in xrange(entry["hdr_blocks"]):
if bl >= range_[0] and bl <= range_[1]:
mem += tar.buf[bl * BLOCKSIZE : bl * BLOCKSIZE + BLOCKSIZE]
range_ = (entry["hdr_blocks"], range_[1])
if range_[0] > range_[1]:
return mem
# for sanity, shift ranges
range_ = (range_[0] - entry["hdr_blocks"], range_[1] - entry["hdr_blocks"])
# compute needed padding data
nb_blocks, remainder = divmod(entry["size"], BLOCKSIZE)
start = range_[0] * BLOCKSIZE
last = False
if remainder > 0 and nb_blocks == range_[1]:
last = True
end = entry["size"] - 1
else:
end = range_[1] * BLOCKSIZE + BLOCKSIZE - 1
if entry["slo"]:
# we have now to compute which block(s) we need to read
slo_start = 0
for part in entry["slo"]:
if start > part["bytes"]:
start -= part["bytes"]
end -= part["bytes"]
continue
slo_end = min(end, part["bytes"])
slo_start = start
cnt, path = part["name"].strip("/").split("/", 1)
_, data = self.storage.object_fetch(
self.acct, cnt, path, ranges=[(slo_start, slo_end)], properties=False
)
mem += "".join(data)
start = max(0, start - part["bytes"])
end -= part["bytes"]
if end <= 0:
break
else:
_, data = self.storage.object_fetch(
self.acct, self.container, name, ranges=[(start, end)], properties=False
)
mem += "".join(data)
if last:
mem += NUL * (BLOCKSIZE - remainder)
if not mem:
self.logger.error("no data extracted")
if divmod(len(mem), BLOCKSIZE)[1]:
self.logger.error("data written does not match blocksize")
return mem
|
def create_tar_oio_stream(self, entry, range_):
"""Extract data from entry from object"""
mem = ""
name = entry["name"]
if range_[0] < entry["hdr_blocks"]:
tar = OioTarEntry(self.storage, self.acct, self.container, name)
for bl in xrange(entry["hdr_blocks"]):
if bl >= range_[0] and bl <= range_[1]:
mem += tar.buf[bl * BLOCKSIZE : bl * BLOCKSIZE + BLOCKSIZE]
range_ = (entry["hdr_blocks"], range_[1])
if range_[0] > range_[1]:
return mem
# for sanity, shift ranges
range_ = (range_[0] - entry["hdr_blocks"], range_[1] - entry["hdr_blocks"])
# compute needed padding data
nb_blocks, remainder = divmod(entry["size"], BLOCKSIZE)
start = range_[0] * BLOCKSIZE
last = False
if remainder > 0 and nb_blocks == range_[1]:
last = True
end = entry["size"] - 1
else:
end = range_[1] * BLOCKSIZE + BLOCKSIZE - 1
if entry["slo"]:
# we have now to compute which block(s) we need to read
slo_start = 0
for part in entry["slo"]:
if start > part["bytes"]:
start -= part["bytes"]
end -= part["bytes"]
continue
slo_end = min(end, part["bytes"])
slo_start = start
cnt, path = part["name"].strip("/").split("/", 1)
_, data = self.storage.object_fetch(
self.acct, cnt, path, ranges=[(slo_start, slo_end)]
)
mem += "".join(data)
start = max(0, start - part["bytes"])
end -= part["bytes"]
if end <= 0:
break
else:
_, data = self.storage.object_fetch(
self.acct, self.container, name, ranges=[(start, end)]
)
mem += "".join(data)
if last:
mem += NUL * (BLOCKSIZE - remainder)
if not mem:
self.logger.error("no data extracted")
if divmod(len(mem), BLOCKSIZE)[1]:
self.logger.error("data written does not match blocksize")
return mem
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def content_locate(
self,
account=None,
reference=None,
path=None,
cid=None,
content=None,
version=None,
properties=True,
**kwargs,
):
"""
Get a description of the content along with the list of its chunks.
:param cid: container id that can be used in place of `account`
and `reference`
:type cid: hexadecimal `str`
:param content: content id that can be used in place of `path`
:type content: hexadecimal `str`
:param properties: should the request return object properties
along with content description
:type properties: `bool`
:returns: a tuple with content metadata `dict` as first element
and chunk `list` as second element
"""
uri = self._make_uri("content/locate")
params = self._make_params(
account, reference, path, cid=cid, content=content, version=version
)
params["properties"] = properties
try:
resp, chunks = self._direct_request("GET", uri, params=params, **kwargs)
content_meta = extract_content_headers_meta(resp.headers)
except exceptions.OioNetworkException as exc:
# TODO(FVE): this special behavior can be removed when
# the 'content/locate' protocol is changed to include
# object properties in the response body instead of headers.
if properties and "got more than 100 headers" in str(exc):
params["properties"] = False
_resp, chunks = self._direct_request("GET", uri, params=params, **kwargs)
content_meta = self.content_get_properties(
account,
reference,
path,
cid=cid,
content=content,
version=version,
**kwargs,
)
else:
raise
return content_meta, chunks
|
def content_locate(
self,
account=None,
reference=None,
path=None,
cid=None,
content=None,
version=None,
**kwargs,
):
"""
Get a description of the content along with the list of its chunks.
:param cid: container id that can be used in place of `account`
and `reference`
:type cid: hexadecimal `str`
:param content: content id that can be used in place of `path`
:type content: hexadecimal `str`
:returns: a tuple with content metadata `dict` as first element
and chunk `list` as second element
"""
uri = self._make_uri("content/locate")
params = self._make_params(
account, reference, path, cid=cid, content=content, version=version
)
resp, chunks = self._direct_request("GET", uri, params=params, **kwargs)
# FIXME(FVE): see extract_content_headers_meta() code
content_meta = extract_content_headers_meta(resp.headers)
return content_meta, chunks
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def check_obj(self, target, recurse=False):
account = target.account
container = target.container
obj = target.obj
if (account, container, obj) in self.running:
self.running[(account, container, obj)].wait()
if (account, container, obj) in self.list_cache:
return self.list_cache[(account, container, obj)]
self.running[(account, container, obj)] = Event()
print('Checking object "%s"' % target)
container_listing, ct_meta = self.check_container(target)
error = False
if obj not in container_listing:
print(" Object %s missing from container listing" % target)
error = True
# checksum = None
else:
# TODO check checksum match
# checksum = container_listing[obj]['hash']
pass
results = []
meta = dict()
try:
meta, results = self.container_client.content_locate(
account=account, reference=container, path=obj, properties=False
)
except exc.NotFound as e:
self.object_not_found += 1
error = True
print(' Not found object "%s": %s' % (target, str(e)))
except Exception as e:
self.object_exceptions += 1
error = True
print(' Exception object "%s": %s' % (target, str(e)))
chunk_listing = dict()
for chunk in results:
chunk_listing[chunk["url"]] = chunk
self.check_obj_policy(target.copy(), meta, results)
self.objects_checked += 1
self.list_cache[(account, container, obj)] = (chunk_listing, meta)
self.running[(account, container, obj)].send(True)
del self.running[(account, container, obj)]
if recurse:
for chunk in chunk_listing:
t = target.copy()
t.chunk = chunk
self.pool.spawn_n(self.check_chunk, t)
if error and self.error_file:
self.write_error(target)
return chunk_listing, meta
|
def check_obj(self, target, recurse=False):
account = target.account
container = target.container
obj = target.obj
if (account, container, obj) in self.running:
self.running[(account, container, obj)].wait()
if (account, container, obj) in self.list_cache:
return self.list_cache[(account, container, obj)]
self.running[(account, container, obj)] = Event()
print('Checking object "%s"' % target)
container_listing, ct_meta = self.check_container(target)
error = False
if obj not in container_listing:
print(" Object %s missing from container listing" % target)
error = True
# checksum = None
else:
# TODO check checksum match
# checksum = container_listing[obj]['hash']
pass
results = []
meta = dict()
try:
meta, results = self.container_client.content_locate(
account=account, reference=container, path=obj
)
except exc.NotFound as e:
self.object_not_found += 1
error = True
print(' Not found object "%s": %s' % (target, str(e)))
except Exception as e:
self.object_exceptions += 1
error = True
print(' Exception object "%s": %s' % (target, str(e)))
chunk_listing = dict()
for chunk in results:
chunk_listing[chunk["url"]] = chunk
self.check_obj_policy(target.copy(), meta, results)
self.objects_checked += 1
self.list_cache[(account, container, obj)] = (chunk_listing, meta)
self.running[(account, container, obj)].send(True)
del self.running[(account, container, obj)]
if recurse:
for chunk in chunk_listing:
t = target.copy()
t.chunk = chunk
self.pool.spawn_n(self.check_chunk, t)
if error and self.error_file:
self.write_error(target)
return chunk_listing, meta
|
https://github.com/open-io/oio-sds/issues/1369
|
Checking object "account=AUTH_demo, container=testing, obj=magic"
Checking container "account=AUTH_demo, container=testing, obj=magic"
Checking account "account=AUTH_demo, container=testing, obj=magic"
Exception object "account=AUTH_demo, container=testing, obj=magic": ('reqid=None', 'Connection aborted.', HTTPException('got more than 100 headers',))
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/eventlet/greenpool.py", line 82, in _spawn_n_impl
func(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 244, in check_obj
self.check_obj_policy(target.copy(), meta, results)
File "/usr/lib/python2.7/site-packages/oio/crawler/integrity.py", line 185, in check_obj_policy
stg_met = STORAGE_METHODS.load(obj_meta['chunk_method'])
KeyError: 'chunk_method'
Report
Accounts checked : 1
Containers checked: 1
Objects checked : 0
Exceptions : 1
Chunks checked : 0
|
KeyError
|
def init(self):
eventlet.monkey_patch(os=False)
self.tube = self.conf.get("tube", DEFAULT_TUBE)
self.session = requests.Session()
self.cs = ConscienceClient(self.conf)
self.rdir = RdirClient(self.conf)
self._acct_addr = None
self.acct_update = 0
self.graceful_timeout = 1
self.acct_refresh_interval = int_value(self.conf.get("acct_refresh_interval"), 60)
self.acct_update = true_value(self.conf.get("acct_update", True))
self.rdir_update = true_value(self.conf.get("rdir_update", True))
if "handlers_conf" not in self.conf:
raise ValueError("'handlers_conf' path not defined in conf")
self.handlers = loadhandlers(
self.conf.get("handlers_conf"), global_conf=self.conf, app=self
)
super(EventWorker, self).init()
|
def init(self):
eventlet.monkey_patch(os=False)
self.tube = self.conf.get("tube", DEFAULT_TUBE)
self.session = requests.Session()
self.cs = ConscienceClient(self.conf)
self.rdir = RdirClient(self.conf)
self._acct_addr = None
self.acct_update = 0
self.graceful_timeout = 1
self.acct_refresh_interval = int_value(self.conf.get("acct_refresh_interval"), 60)
self.acct_update = true_value(self.conf.get("acct_update", True))
self.rdir_update = true_value(self.conf.get("rdir_update", True))
if "handlers_conf" not in self.conf:
raise ValueError("'handlers_conf' path not defined in conf")
self.handlers = loadhandlers(
self.conf.get("handlers_conf"), evt_types, global_conf=self.conf, app=self
)
super(EventWorker, self).init()
|
https://github.com/open-io/oio-sds/issues/882
|
Traceback (most recent call last):
File "/home/fvennetier/src/public_git/oio-sds/oio/event/agent.py", line 166, in spawn_worker
worker.init()
File "/home/fvennetier/src/public_git/oio-sds/oio/event/consumer.py", line 144, in init
app=self)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 72, in loadhandlers
for name in names
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 72, in <genexpr>
for name in names
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 78, in loadhandler
context = loader.get_context(HANDLER, name, global_conf)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 137, in get_context
obj_type, name=name)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 206, in find_config_section
'No section %r found in config %r' % (name, self.filename))
LookupError: No section 'account.services' found in config '/home/fvennetier/.oio/sds/conf/event-handlers.conf'
|
LookupError
|
def loadhandlers(path, global_conf=None, **kwargs):
loader = ConfigLoader(path)
handlers = {}
handlers.update(
(name[8:], loadhandler(loader, name[8:], global_conf, **kwargs))
for name in loader.get_sections(prefix="handler")
)
return handlers
|
def loadhandlers(path, names, global_conf=None, **kwargs):
loader = ConfigLoader(path)
handlers = {}
handlers.update(
(name, loadhandler(loader, name, global_conf, **kwargs)) for name in names
)
return handlers
|
https://github.com/open-io/oio-sds/issues/882
|
Traceback (most recent call last):
File "/home/fvennetier/src/public_git/oio-sds/oio/event/agent.py", line 166, in spawn_worker
worker.init()
File "/home/fvennetier/src/public_git/oio-sds/oio/event/consumer.py", line 144, in init
app=self)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 72, in loadhandlers
for name in names
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 72, in <genexpr>
for name in names
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 78, in loadhandler
context = loader.get_context(HANDLER, name, global_conf)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 137, in get_context
obj_type, name=name)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/loader.py", line 206, in find_config_section
'No section %r found in config %r' % (name, self.filename))
LookupError: No section 'account.services' found in config '/home/fvennetier/.oio/sds/conf/event-handlers.conf'
|
LookupError
|
def __init__(self, namespace, session=None, **kwargs):
self.conf = {"namespace": namespace}
self.conf.update(kwargs)
self._volume = None
self._event = None
self._cluster = None
self._meta0 = None
self.session = session
|
def __init__(self, namespace, endpoint=None, session=None):
self.conf = {"namespace": namespace}
self._volume = None
self._event = None
self._cluster = None
self._meta0 = None
self.session = session
|
https://github.com/open-io/oio-sds/issues/828
|
# openio --oio-ns=VCFR1 directory bootstrap --replicas 3 -v --debug
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=meta1 HTTP/1.1" 200 376
Computing meta1 prefix mapping...
0%
10%
20%
30%
40%
50%
60%
70%
80%
90%
100%
Equilibrating...
META1 Digits = 4
Replicas = 3
Scored positively = 3
Ideal number of prefixes per meta1: 65536
Rebalance moved 0 prefixes
Saving...
Starting new HTTP connection (1): 172.17.25.5
"POST /v3.0/VCFR1/admin/meta0_force HTTP/1.1" 204 0
Assigning rdir services to rawx services...
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=rawx HTTP/1.1" 200 256
"GET /v3.0/VCFR1/conscience/list?full=1&type=rdir HTTP/1.1" 200 642
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/reference/show?acct=_RDIR&ref=172.17.25.7%3A6004&type=rdir HTTP/1.1" 404 59
('Connection aborted.', BadStatusLine("''",))
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/cliff/app.py", line 374, in run_subcommand
result = cmd.run(parsed_args)
File "/usr/lib/python2.7/dist-packages/cliff/command.py", line 54, in run
self.take_action(parsed_args)
File "/usr/lib/python2.7/dist-packages/oio/cli/admin/directory.py", line 60, in take_action
self.app.client_manager.admin.volume.assign_all_rawx()
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 38, in assign_all_rawx
rdir = self._smart_link_rdir(rawx['addr'], cs, all_rdir)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 68, in _smart_link_rdir
polled = cs.poll('rdir', avoid=avoids, known=known)[0]
File "/usr/lib/python2.7/dist-packages/oio/conscience/client.py", line 47, in poll
data=json.dumps(ibody))
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 51, in _request
return self._direct_request(method, url, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 37, in _direct_request
**kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/dist-packages/requests/adapters.py", line 426, in send
raise ConnectionError(err, request=request)
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
Exception raised: ('Connection aborted.', BadStatusLine("''",))
|
ConnectionError
|
def make_client(instance):
"""
Build an AdminClient that will be added as "admin"
field of `instance`.
:param instance: an instance of ClientManager
:returns: an instance of AdminClient
"""
client = AdminClient(**instance.get_process_configuration())
return client
|
def make_client(instance):
"""
Build an AdminClient that will be added as "admin"
field of `instance`.
:param instance: an instance of ClientManager
:returns: an instance of AdminClient
"""
endpoint = instance.get_endpoint("admin")
client = AdminClient(
session=instance.session, endpoint=endpoint, namespace=instance.namespace
)
return client
|
https://github.com/open-io/oio-sds/issues/828
|
# openio --oio-ns=VCFR1 directory bootstrap --replicas 3 -v --debug
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=meta1 HTTP/1.1" 200 376
Computing meta1 prefix mapping...
0%
10%
20%
30%
40%
50%
60%
70%
80%
90%
100%
Equilibrating...
META1 Digits = 4
Replicas = 3
Scored positively = 3
Ideal number of prefixes per meta1: 65536
Rebalance moved 0 prefixes
Saving...
Starting new HTTP connection (1): 172.17.25.5
"POST /v3.0/VCFR1/admin/meta0_force HTTP/1.1" 204 0
Assigning rdir services to rawx services...
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=rawx HTTP/1.1" 200 256
"GET /v3.0/VCFR1/conscience/list?full=1&type=rdir HTTP/1.1" 200 642
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/reference/show?acct=_RDIR&ref=172.17.25.7%3A6004&type=rdir HTTP/1.1" 404 59
('Connection aborted.', BadStatusLine("''",))
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/cliff/app.py", line 374, in run_subcommand
result = cmd.run(parsed_args)
File "/usr/lib/python2.7/dist-packages/cliff/command.py", line 54, in run
self.take_action(parsed_args)
File "/usr/lib/python2.7/dist-packages/oio/cli/admin/directory.py", line 60, in take_action
self.app.client_manager.admin.volume.assign_all_rawx()
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 38, in assign_all_rawx
rdir = self._smart_link_rdir(rawx['addr'], cs, all_rdir)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 68, in _smart_link_rdir
polled = cs.poll('rdir', avoid=avoids, known=known)[0]
File "/usr/lib/python2.7/dist-packages/oio/conscience/client.py", line 47, in poll
data=json.dumps(ibody))
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 51, in _request
return self._direct_request(method, url, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 37, in _direct_request
**kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/dist-packages/requests/adapters.py", line 426, in send
raise ConnectionError(err, request=request)
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
Exception raised: ('Connection aborted.', BadStatusLine("''",))
|
ConnectionError
|
def __get__(self, instance, owner):
instance.setup()
if self._handle is None:
self._handle = self.factory(instance)
return self._handle
|
def __get__(self, instance, owner):
if self._handle is None:
self._handle = self.factory(instance)
return self._handle
|
https://github.com/open-io/oio-sds/issues/828
|
# openio --oio-ns=VCFR1 directory bootstrap --replicas 3 -v --debug
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=meta1 HTTP/1.1" 200 376
Computing meta1 prefix mapping...
0%
10%
20%
30%
40%
50%
60%
70%
80%
90%
100%
Equilibrating...
META1 Digits = 4
Replicas = 3
Scored positively = 3
Ideal number of prefixes per meta1: 65536
Rebalance moved 0 prefixes
Saving...
Starting new HTTP connection (1): 172.17.25.5
"POST /v3.0/VCFR1/admin/meta0_force HTTP/1.1" 204 0
Assigning rdir services to rawx services...
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=rawx HTTP/1.1" 200 256
"GET /v3.0/VCFR1/conscience/list?full=1&type=rdir HTTP/1.1" 200 642
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/reference/show?acct=_RDIR&ref=172.17.25.7%3A6004&type=rdir HTTP/1.1" 404 59
('Connection aborted.', BadStatusLine("''",))
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/cliff/app.py", line 374, in run_subcommand
result = cmd.run(parsed_args)
File "/usr/lib/python2.7/dist-packages/cliff/command.py", line 54, in run
self.take_action(parsed_args)
File "/usr/lib/python2.7/dist-packages/oio/cli/admin/directory.py", line 60, in take_action
self.app.client_manager.admin.volume.assign_all_rawx()
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 38, in assign_all_rawx
rdir = self._smart_link_rdir(rawx['addr'], cs, all_rdir)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 68, in _smart_link_rdir
polled = cs.poll('rdir', avoid=avoids, known=known)[0]
File "/usr/lib/python2.7/dist-packages/oio/conscience/client.py", line 47, in poll
data=json.dumps(ibody))
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 51, in _request
return self._direct_request(method, url, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 37, in _direct_request
**kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/dist-packages/requests/adapters.py", line 426, in send
raise ConnectionError(err, request=request)
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
Exception raised: ('Connection aborted.', BadStatusLine("''",))
|
ConnectionError
|
def setup(self):
if not self.setup_done:
if not self._options.get("namespace", None):
msg = "Set a namespace with --oio-ns, OIO_NS\n"
raise exceptions.CommandError("Missing parameter: \n%s" % msg)
self.namespace = self._options["namespace"]
sds_conf = load_namespace_conf(self.namespace) or {}
if not self._options.get("proxyd_url") and "proxy" in sds_conf:
proxyd_url = "http://%s" % sds_conf.get("proxy")
self._options["proxyd_url"] = proxyd_url
validate_options(self._options)
LOG.debug("Using parameters %s" % self._options)
self.setup_done = True
self._admin_mode = self._options.get("admin_mode")
if "meta1_digits" in sds_conf:
self._meta1_digits = int(sds_conf["meta1_digits"])
self._options["log_level"] = logging.getLevelName(LOG.getEffectiveLevel())
|
def setup(self):
if not self.setup_done:
if not self._options.get("namespace", None):
msg = "Set a namespace with --oio-ns, OIO_NS\n"
raise exceptions.CommandError("Missing parameter: \n%s" % msg)
self.namespace = self._options["namespace"]
sds_conf = load_namespace_conf(self.namespace) or {}
if not self._options.get("proxyd_url") and "proxy" in sds_conf:
proxyd_url = "http://%s" % sds_conf.get("proxy")
self._options["proxyd_url"] = proxyd_url
validate_options(self._options)
LOG.debug("Using parameters %s" % self._options)
self.setup_done = True
self._admin_mode = self._options.get("admin_mode")
if "meta1_digits" in sds_conf:
self._meta1_digits = int(sds_conf["meta1_digits"])
|
https://github.com/open-io/oio-sds/issues/828
|
# openio --oio-ns=VCFR1 directory bootstrap --replicas 3 -v --debug
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=meta1 HTTP/1.1" 200 376
Computing meta1 prefix mapping...
0%
10%
20%
30%
40%
50%
60%
70%
80%
90%
100%
Equilibrating...
META1 Digits = 4
Replicas = 3
Scored positively = 3
Ideal number of prefixes per meta1: 65536
Rebalance moved 0 prefixes
Saving...
Starting new HTTP connection (1): 172.17.25.5
"POST /v3.0/VCFR1/admin/meta0_force HTTP/1.1" 204 0
Assigning rdir services to rawx services...
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=rawx HTTP/1.1" 200 256
"GET /v3.0/VCFR1/conscience/list?full=1&type=rdir HTTP/1.1" 200 642
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/reference/show?acct=_RDIR&ref=172.17.25.7%3A6004&type=rdir HTTP/1.1" 404 59
('Connection aborted.', BadStatusLine("''",))
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/cliff/app.py", line 374, in run_subcommand
result = cmd.run(parsed_args)
File "/usr/lib/python2.7/dist-packages/cliff/command.py", line 54, in run
self.take_action(parsed_args)
File "/usr/lib/python2.7/dist-packages/oio/cli/admin/directory.py", line 60, in take_action
self.app.client_manager.admin.volume.assign_all_rawx()
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 38, in assign_all_rawx
rdir = self._smart_link_rdir(rawx['addr'], cs, all_rdir)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 68, in _smart_link_rdir
polled = cs.poll('rdir', avoid=avoids, known=known)[0]
File "/usr/lib/python2.7/dist-packages/oio/conscience/client.py", line 47, in poll
data=json.dumps(ibody))
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 51, in _request
return self._direct_request(method, url, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 37, in _direct_request
**kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/dist-packages/requests/adapters.py", line 426, in send
raise ConnectionError(err, request=request)
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
Exception raised: ('Connection aborted.', BadStatusLine("''",))
|
ConnectionError
|
def build_option_parser(self, description, version):
parser = super(OpenIOShell, self).build_option_parser(description, version)
parser.add_argument(
"--oio-ns",
metavar="<namespace>",
dest="ns",
default=utils.env("OIO_NS"),
help="Namespace name (Env: OIO_NS)",
)
parser.add_argument(
"--oio-account",
metavar="<account>",
dest="account_name",
default=utils.env("OIO_ACCOUNT"),
help="Account name (Env: OIO_ACCOUNT)",
)
parser.add_argument(
"--oio-proxyd-url",
metavar="<proxyd url>",
dest="proxyd_url",
default=utils.env("OIO_PROXYD_URL"),
help="Proxyd URL (Env: OIO_PROXYD_URL)",
)
parser.add_argument(
"--admin",
dest="admin_mode",
action="store_true",
help="Add 'admin mode' flag to all proxy requests",
)
return clientmanager.build_plugin_option_parser(parser)
|
def build_option_parser(self, description, version):
parser = super(OpenIOShell, self).build_option_parser(description, version)
parser.add_argument(
"--oio-ns",
metavar="<namespace>",
dest="ns",
default=utils.env("OIO_NS"),
help="Namespace name (Env: OIO_NS)",
)
parser.add_argument(
"--oio-account",
metavar="<account>",
dest="account_name",
default=utils.env("OIO_ACCOUNT"),
help="Account name (Env: OIO_ACCOUNT)",
)
parser.add_argument(
"--oio-proxyd-url",
metavar="<proxyd url>",
dest="proxyd_url",
default=utils.env("OIO_PROXYD_URL"),
help="Proxyd URL (Env: OIO_PROXYD_URL)",
)
parser.add_argument(
"--admin",
dest="admin_mode",
action="store_true",
help="passing commands into admin mode",
)
return clientmanager.build_plugin_option_parser(parser)
|
https://github.com/open-io/oio-sds/issues/828
|
# openio --oio-ns=VCFR1 directory bootstrap --replicas 3 -v --debug
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=meta1 HTTP/1.1" 200 376
Computing meta1 prefix mapping...
0%
10%
20%
30%
40%
50%
60%
70%
80%
90%
100%
Equilibrating...
META1 Digits = 4
Replicas = 3
Scored positively = 3
Ideal number of prefixes per meta1: 65536
Rebalance moved 0 prefixes
Saving...
Starting new HTTP connection (1): 172.17.25.5
"POST /v3.0/VCFR1/admin/meta0_force HTTP/1.1" 204 0
Assigning rdir services to rawx services...
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=rawx HTTP/1.1" 200 256
"GET /v3.0/VCFR1/conscience/list?full=1&type=rdir HTTP/1.1" 200 642
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/reference/show?acct=_RDIR&ref=172.17.25.7%3A6004&type=rdir HTTP/1.1" 404 59
('Connection aborted.', BadStatusLine("''",))
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/cliff/app.py", line 374, in run_subcommand
result = cmd.run(parsed_args)
File "/usr/lib/python2.7/dist-packages/cliff/command.py", line 54, in run
self.take_action(parsed_args)
File "/usr/lib/python2.7/dist-packages/oio/cli/admin/directory.py", line 60, in take_action
self.app.client_manager.admin.volume.assign_all_rawx()
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 38, in assign_all_rawx
rdir = self._smart_link_rdir(rawx['addr'], cs, all_rdir)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 68, in _smart_link_rdir
polled = cs.poll('rdir', avoid=avoids, known=known)[0]
File "/usr/lib/python2.7/dist-packages/oio/conscience/client.py", line 47, in poll
data=json.dumps(ibody))
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 51, in _request
return self._direct_request(method, url, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 37, in _direct_request
**kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/dist-packages/requests/adapters.py", line 426, in send
raise ConnectionError(err, request=request)
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
Exception raised: ('Connection aborted.', BadStatusLine("''",))
|
ConnectionError
|
def initialize_app(self, argv):
super(OpenIOShell, self).initialize_app(argv)
for module in clientmanager.PLUGIN_MODULES:
api = module.API_NAME
cmd_group = "openio." + api.replace("-", "_")
self.command_manager.add_command_group(cmd_group)
self.log.debug("%s API: cmd group %s" % (api, cmd_group))
self.command_manager.add_command_group("openio.common")
self.command_manager.add_command_group("openio.ext")
options = {
"namespace": self.options.ns,
"account_name": self.options.account_name,
"proxyd_url": self.options.proxyd_url,
"admin_mode": self.options.admin_mode,
"log_level": logging.getLevelName(logging.getLogger("").getEffectiveLevel()),
"is_cli": True,
}
self.print_help_if_requested()
self.client_manager = clientmanager.ClientManager(options)
|
def initialize_app(self, argv):
super(OpenIOShell, self).initialize_app(argv)
for module in clientmanager.PLUGIN_MODULES:
api = module.API_NAME
cmd_group = "openio." + api.replace("-", "_")
self.command_manager.add_command_group(cmd_group)
self.log.debug("%s API: cmd group %s" % (api, cmd_group))
self.command_manager.add_command_group("openio.common")
self.command_manager.add_command_group("openio.ext")
options = {
"namespace": self.options.ns,
"account_name": self.options.account_name,
"proxyd_url": self.options.proxyd_url,
"admin_mode": self.options.admin_mode,
}
self.print_help_if_requested()
self.client_manager = clientmanager.ClientManager(options)
|
https://github.com/open-io/oio-sds/issues/828
|
# openio --oio-ns=VCFR1 directory bootstrap --replicas 3 -v --debug
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=meta1 HTTP/1.1" 200 376
Computing meta1 prefix mapping...
0%
10%
20%
30%
40%
50%
60%
70%
80%
90%
100%
Equilibrating...
META1 Digits = 4
Replicas = 3
Scored positively = 3
Ideal number of prefixes per meta1: 65536
Rebalance moved 0 prefixes
Saving...
Starting new HTTP connection (1): 172.17.25.5
"POST /v3.0/VCFR1/admin/meta0_force HTTP/1.1" 204 0
Assigning rdir services to rawx services...
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=rawx HTTP/1.1" 200 256
"GET /v3.0/VCFR1/conscience/list?full=1&type=rdir HTTP/1.1" 200 642
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/reference/show?acct=_RDIR&ref=172.17.25.7%3A6004&type=rdir HTTP/1.1" 404 59
('Connection aborted.', BadStatusLine("''",))
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/cliff/app.py", line 374, in run_subcommand
result = cmd.run(parsed_args)
File "/usr/lib/python2.7/dist-packages/cliff/command.py", line 54, in run
self.take_action(parsed_args)
File "/usr/lib/python2.7/dist-packages/oio/cli/admin/directory.py", line 60, in take_action
self.app.client_manager.admin.volume.assign_all_rawx()
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 38, in assign_all_rawx
rdir = self._smart_link_rdir(rawx['addr'], cs, all_rdir)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 68, in _smart_link_rdir
polled = cs.poll('rdir', avoid=avoids, known=known)[0]
File "/usr/lib/python2.7/dist-packages/oio/conscience/client.py", line 47, in poll
data=json.dumps(ibody))
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 51, in _request
return self._direct_request(method, url, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 37, in _direct_request
**kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/dist-packages/requests/adapters.py", line 426, in send
raise ConnectionError(err, request=request)
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
Exception raised: ('Connection aborted.', BadStatusLine("''",))
|
ConnectionError
|
def get_logger(
conf,
name=None,
verbose=False,
fmt="%(process)d %(thread)X %(name)s %(levelname)s %(message)s",
):
if not conf:
conf = {}
if name is None:
name = "log"
logger = logging.getLogger(name)
logger.propagate = False
syslog_prefix = conf.get("syslog_prefix", "")
formatter = logging.Formatter(fmt=fmt)
if syslog_prefix:
fmt = "%s: %s" % (syslog_prefix, fmt)
syslog_formatter = logging.Formatter(fmt=fmt)
if not hasattr(get_logger, "handler4logger"):
get_logger.handler4logger = {}
if logger in get_logger.handler4logger:
logger.removeHandler(get_logger.handler4logger[logger])
facility = getattr(
SysLogHandler, conf.get("log_facility", "LOG_LOCAL0"), SysLogHandler.LOG_LOCAL0
)
log_address = conf.get("log_address", "/dev/log")
try:
handler = SysLogHandler(address=log_address, facility=facility)
except socket.error as exc:
if exc.errno not in [errno.ENOTSOCK, errno.ENOENT]:
raise exc
handler = SysLogHandler(facility=facility)
handler.setFormatter(syslog_formatter)
logger.addHandler(handler)
get_logger.handler4logger[logger] = handler
logging_level = getattr(
logging, conf.get("log_level", "INFO").upper(), logging.INFO
)
if (
verbose
or conf.get("is_cli")
or hasattr(get_logger, "console_handler4logger")
or logging_level < logging.INFO
):
if not hasattr(get_logger, "console_handler4logger"):
get_logger.console_handler4logger = {}
if logger in get_logger.console_handler4logger:
logger.removeHandler(get_logger.console_handler4logger[logger])
console_handler = logging.StreamHandler(sys.__stderr__)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
get_logger.console_handler4logger[logger] = console_handler
logger.setLevel(logging_level)
return logger
|
def get_logger(
conf,
name=None,
verbose=False,
fmt="%(process)d %(thread)X %(name)s %(levelname)s %(message)s",
):
if not conf:
conf = {}
if name is None:
name = "log"
logger = logging.getLogger(name)
logger.propagate = False
syslog_prefix = conf.get("syslog_prefix", "")
formatter = logging.Formatter(fmt=fmt)
if syslog_prefix:
fmt = "%s: %s" % (syslog_prefix, fmt)
syslog_formatter = logging.Formatter(fmt=fmt)
if not hasattr(get_logger, "handler4logger"):
get_logger.handler4logger = {}
if logger in get_logger.handler4logger:
logger.removeHandler(get_logger.handler4logger[logger])
facility = getattr(
SysLogHandler, conf.get("log_facility", "LOG_LOCAL0"), SysLogHandler.LOG_LOCAL0
)
log_address = conf.get("log_address", "/dev/log")
try:
handler = SysLogHandler(address=log_address, facility=facility)
except socket.error as e:
if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
raise e
handler = SysLogHandler(facility=facility)
handler.setFormatter(syslog_formatter)
logger.addHandler(handler)
get_logger.handler4logger[logger] = handler
if verbose or hasattr(get_logger, "console_handler4logger"):
if not hasattr(get_logger, "console_handler4logger"):
get_logger.console_handler4logger = {}
if logger in get_logger.console_handler4logger:
logger.removeHandler(get_logger.console_handler4logger[logger])
console_handler = logging.StreamHandler(sys.__stderr__)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
get_logger.console_handler4logger[logger] = console_handler
logging_level = getattr(
logging, conf.get("log_level", "INFO").upper(), logging.INFO
)
logger.setLevel(logging_level)
return logger
|
https://github.com/open-io/oio-sds/issues/828
|
# openio --oio-ns=VCFR1 directory bootstrap --replicas 3 -v --debug
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=meta1 HTTP/1.1" 200 376
Computing meta1 prefix mapping...
0%
10%
20%
30%
40%
50%
60%
70%
80%
90%
100%
Equilibrating...
META1 Digits = 4
Replicas = 3
Scored positively = 3
Ideal number of prefixes per meta1: 65536
Rebalance moved 0 prefixes
Saving...
Starting new HTTP connection (1): 172.17.25.5
"POST /v3.0/VCFR1/admin/meta0_force HTTP/1.1" 204 0
Assigning rdir services to rawx services...
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/conscience/list?type=rawx HTTP/1.1" 200 256
"GET /v3.0/VCFR1/conscience/list?full=1&type=rdir HTTP/1.1" 200 642
Starting new HTTP connection (1): 172.17.25.5
"GET /v3.0/VCFR1/reference/show?acct=_RDIR&ref=172.17.25.7%3A6004&type=rdir HTTP/1.1" 404 59
('Connection aborted.', BadStatusLine("''",))
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/cliff/app.py", line 374, in run_subcommand
result = cmd.run(parsed_args)
File "/usr/lib/python2.7/dist-packages/cliff/command.py", line 54, in run
self.take_action(parsed_args)
File "/usr/lib/python2.7/dist-packages/oio/cli/admin/directory.py", line 60, in take_action
self.app.client_manager.admin.volume.assign_all_rawx()
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 38, in assign_all_rawx
rdir = self._smart_link_rdir(rawx['addr'], cs, all_rdir)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 68, in _smart_link_rdir
polled = cs.poll('rdir', avoid=avoids, known=known)[0]
File "/usr/lib/python2.7/dist-packages/oio/conscience/client.py", line 47, in poll
data=json.dumps(ibody))
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 51, in _request
return self._direct_request(method, url, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/client.py", line 37, in _direct_request
**kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python2.7/dist-packages/requests/adapters.py", line 426, in send
raise ConnectionError(err, request=request)
ConnectionError: ('Connection aborted.', BadStatusLine("''",))
Exception raised: ('Connection aborted.', BadStatusLine("''",))
|
ConnectionError
|
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
account = self.app.client_manager.get_account()
container = parsed_args.container
data = self.app.client_manager.storage.container_get_properties(account, container)
data_dir = self.app.client_manager.directory.get(account, container)
info = {
"account": data["system"]["sys.account"],
"base_name": data["system"]["sys.name"],
"name": data["system"]["sys.user.name"],
"meta0": list(),
"meta1": list(),
"meta2": list(),
}
for d in data_dir["srv"]:
if d["type"] == "meta2":
info["meta2"].append(d["host"])
for d in data_dir["dir"]:
if d["type"] == "meta0":
info["meta0"].append(d["host"])
if d["type"] == "meta1":
info["meta1"].append(d["host"])
for stype in ["meta0", "meta1", "meta2"]:
info[stype] = ", ".join(info[stype])
return zip(*sorted(info.iteritems()))
|
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
account = self.app.client_manager.get_account()
container = parsed_args.container
data = self.app.client_manager.storage.container_show(account, container)
data_dir = self.app.client_manager.directory.get(account, container)
info = {
"account": data["system"]["sys.account"],
"base_name": data["system"]["sys.name"],
"name": data["system"]["sys.user.name"],
"meta0": list(),
"meta1": list(),
"meta2": list(),
}
for d in data_dir["srv"]:
if d["type"] == "meta2":
info["meta2"].append(d["host"])
for d in data_dir["dir"]:
if d["type"] == "meta0":
info["meta0"].append(d["host"])
if d["type"] == "meta1":
info["meta1"].append(d["host"])
for stype in ["meta0", "meta1", "meta2"]:
info[stype] = ", ".join(info[stype])
return zip(*sorted(info.iteritems()))
|
https://github.com/open-io/oio-sds/issues/800
|
### 0 /home/jfs/public_git/github.com/jfsmig/oio-sds
(oio)jfs@jfs-lenovo 14 # openio container create JFS
+------+---------+
| Name | Created |
+------+---------+
| JFS | True |
+------+---------+
### 0 /home/jfs/public_git/github.com/jfsmig/oio-sds
(oio)jfs@jfs-lenovo 15 # openio --debug -v container locate JFS
Starting new HTTP connection (1): 127.0.0.1
"GET /v3.0/OPENIO/container/show?acct=ACCT&ref=JFS HTTP/1.1" 200 17
Starting new HTTP connection (1): 127.0.0.1
"GET /v3.0/OPENIO/reference/show?acct=ACCT&ref=JFS HTTP/1.1" 200 194
'system'
Traceback (most recent call last):
File "/home/jfs/.local/python/oio/local/lib/python2.7/site-packages/cliff/app.py", line 387, in run_subcommand
result = cmd.run(parsed_args)
File "/home/jfs/.local/python/oio/local/lib/python2.7/site-packages/cliff/display.py", line 100, in run
column_names, data = self.take_action(parsed_args)
File "/home/jfs/public_git/github.com/jfsmig/oio-sds/oio/cli/storage/container.py", line 361, in take_action
info = {'account': data['system']['sys.account'],
KeyError: 'system'
Exception raised: 'system'
|
KeyError
|
def delete_chunk(self, chunk):
resp = None
parsed = urlparse(chunk["id"])
try:
with Timeout(CHUNK_TIMEOUT):
conn = http_connect(parsed.netloc, "DELETE", parsed.path)
resp = conn.getresponse()
resp.chunk = chunk
except (Exception, Timeout) as exc:
self.logger.warn(
'error while deleting chunk %s "%s"', chunk["id"], str(exc.message)
)
return resp
|
def delete_chunk(chunk):
resp = None
p = urlparse(chunk["id"])
try:
with Timeout(CHUNK_TIMEOUT):
conn = http_connect(p.netloc, "DELETE", p.path)
resp = conn.getresponse()
resp.chunk = chunk
except (Exception, Timeout) as e:
self.logger.warn(
'error while deleting chunk %s "%s"', chunk["id"], str(e.message)
)
return resp
|
https://github.com/open-io/oio-sds/issues/695
|
Sep 06 11:30:47.385138 linux-cgzg OIO,NS,event-agent[11419]: 11419 7F6667BF5690 log ERROR handling event 4452 (bury)
Traceback (most recent call last):
File "/home/fvennetier/src/public_git/oio-sds/oio/event/consumer.py", line 209, in handle
self.process_event(job_id, event, beanstalk)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/consumer.py", line 236, in process_event
handler(event, cb)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/filters/base.py", line 18, in __call__
self.process(env, cb)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/filters/content_cleaner.py", line 61, in process
chunk_method = content_headers['chunk-method']
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def process(self, env, cb):
event = Event(env)
if event.event_type == EventTypes.CONTENT_DELETED:
url = event.env.get("url")
chunks = []
content_headers = None
for item in event.data:
if item.get("type") == "chunks":
chunks.append(item)
if item.get("type") == "contents_headers":
content_headers = item
if len(chunks):
if not content_headers:
chunk_method = guess_storage_method(chunks[0]["id"]) + "/"
else:
chunk_method = content_headers["chunk-method"]
handler, storage_method = self._load_handler(chunk_method)
handler(url, chunks, content_headers, storage_method)
return self.app(env, cb)
return self.app(env, cb)
|
def process(self, env, cb):
event = Event(env)
if event.event_type == EventTypes.CONTENT_DELETED:
pile = GreenPile(PARALLEL_CHUNKS_DELETE)
url = event.env.get("url")
chunks = []
content_headers = None
for item in event.data:
if item.get("type") == "chunks":
chunks.append(item)
if item.get("type") == "contents_headers":
content_headers = item
if len(chunks):
def delete_chunk(chunk):
resp = None
p = urlparse(chunk["id"])
try:
with Timeout(CHUNK_TIMEOUT):
conn = http_connect(p.netloc, "DELETE", p.path)
resp = conn.getresponse()
resp.chunk = chunk
except (Exception, Timeout) as e:
self.logger.warn(
'error while deleting chunk %s "%s"',
chunk["id"],
str(e.message),
)
return resp
def delete_chunk_backblaze(chunks, url, storage_method):
meta = {}
meta["container_id"] = url["id"]
chunk_list = []
for chunk in chunks:
chunk["url"] = chunk["id"]
chunk_list.append(chunk)
key_file = self.conf.get("key_file")
backblaze_info = BackblazeUtils.get_credentials(
storage_method, key_file
)
try:
BackblazeDeleteHandler(meta, chunk_list, backblaze_info).delete()
except OioException as e:
self.logger.warn("delete failed: %s" % str(e))
chunk_method = content_headers["chunk-method"]
# don't load storage method other than backblaze
if chunk_method.startswith("backblaze"):
storage_method = STORAGE_METHODS.load(chunk_method)
delete_chunk_backblaze(chunks, url, storage_method)
return self.app(env, cb)
for chunk in chunks:
pile.spawn(delete_chunk, chunk)
resps = [resp for resp in pile if resp]
for resp in resps:
if resp.status != 204:
self.logger.warn(
"failed to delete chunk %s (HTTP %s)",
resp.chunk["id"],
resp.status,
)
return self.app(env, cb)
|
https://github.com/open-io/oio-sds/issues/695
|
Sep 06 11:30:47.385138 linux-cgzg OIO,NS,event-agent[11419]: 11419 7F6667BF5690 log ERROR handling event 4452 (bury)
Traceback (most recent call last):
File "/home/fvennetier/src/public_git/oio-sds/oio/event/consumer.py", line 209, in handle
self.process_event(job_id, event, beanstalk)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/consumer.py", line 236, in process_event
handler(event, cb)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/filters/base.py", line 18, in __call__
self.process(env, cb)
File "/home/fvennetier/src/public_git/oio-sds/oio/event/filters/content_cleaner.py", line 61, in process
chunk_method = content_headers['chunk-method']
TypeError: 'NoneType' object has no attribute '__getitem__'
|
TypeError
|
def register(self):
# Use a boolean so we can easily convert it to a number in conscience
self.service_definition["tags"]["tag.up"] = self.last_status
try:
self.cs.register(self.service["type"], self.service_definition)
except requests.RequestException as rqe:
self.logger.warn(
"Failed to register service %s: %s", self.service_definition["addr"], rqe
)
|
def register(self):
# Use a boolean so we can easily convert it to a number in conscience
self.service_definition["tags"]["tag.up"] = self.last_status
self.cs.register(self.service["type"], self.service_definition)
|
https://github.com/open-io/oio-sds/issues/537
|
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345DA59690 log INFO watcher "meta0|127.0.0.1|6005" stopped
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : Traceback (most recent call last):
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : File "/home/jfs/.local/python/oio/local/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 115, in wait
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : listener.cb(fileno)
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : File "/home/jfs/.local/python/oio/local/lib/python2.7/site-packages/eventlet/greenthread.py", line 214, in main
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : result = function(*args, **kwargs)
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : File "/home/jfs/public_git/github.com/jfsmig/oio-sds/oio/conscience/agent.py", line 70, in start
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : self.watch()
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : File "/home/jfs/public_git/github.com/jfsmig/oio-sds/oio/conscience/agent.py", line 120, in watch
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : raise e
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : ConnectionError: ('Connection aborted.', error(111, 'ECONNREFUSED'))
avril 06 18:06:15 jfs-lenovo OIO,NS,conscience-agent,1[30650]: 30650 7F345EAEAEB0 log ERROR STDERR : Removing descriptor: 7
|
ConnectionError
|
def install_db(
root_login="root",
root_password=None,
db_name=None,
source_sql=None,
admin_password=None,
verbose=True,
force=0,
site_config=None,
reinstall=False,
db_type=None,
db_host=None,
db_port=None,
db_password=None,
no_mariadb_socket=False,
):
from frappe.database import setup_database
if not db_type:
db_type = frappe.conf.db_type or "mariadb"
make_conf(
db_name,
site_config=site_config,
db_password=db_password,
db_type=db_type,
db_host=db_host,
db_port=db_port,
)
frappe.flags.in_install_db = True
frappe.flags.root_login = root_login
frappe.flags.root_password = root_password
setup_database(force, source_sql, verbose, no_mariadb_socket)
frappe.conf.admin_password = frappe.conf.admin_password or admin_password
remove_missing_apps()
frappe.db.create_auth_table()
frappe.db.create_global_search_table()
frappe.db.create_user_settings_table()
frappe.flags.in_install_db = False
|
def install_db(
root_login="root",
root_password=None,
db_name=None,
source_sql=None,
admin_password=None,
verbose=True,
force=0,
site_config=None,
reinstall=False,
db_type=None,
db_host=None,
db_port=None,
db_password=None,
no_mariadb_socket=False,
):
from frappe.database import setup_database
if not db_type:
db_type = frappe.conf.db_type or "mariadb"
make_conf(
db_name, site_config=site_config, db_password=db_password, db_type=db_type
)
frappe.flags.in_install_db = True
frappe.flags.root_login = root_login
frappe.flags.root_password = root_password
setup_database(force, source_sql, verbose, no_mariadb_socket)
frappe.conf.admin_password = frappe.conf.admin_password or admin_password
remove_missing_apps()
frappe.db.create_auth_table()
frappe.db.create_global_search_table()
frappe.db.create_user_settings_table()
frappe.flags.in_install_db = False
|
https://github.com/frappe/frappe/issues/12125
|
$ bench new-site mypgsql.localhost --db-type postgres --db-host postgresql
WARN: bench is installed in editable mode!
This is not the recommended mode of installation for production. Instead, install the package from PyPI with: `pip install frappe-bench`
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 99, in <module>
main()
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 18, in main
click.Group(commands=commands)(prog_name='bench')
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/site.py", line 47, in new_site
db_port=db_port, new_site=True)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/site.py", line 87, in _new_site
db_password=db_password, db_type=db_type, db_host=db_host, db_port=db_port, no_mariadb_socket=no_mariadb_socket)
File "/workspace/development/frappe-bench/apps/frappe/frappe/installer.py", line 27, in install_db
setup_database(force, source_sql, verbose, no_mariadb_socket)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/__init__.py", line 13, in setup_database
return frappe.database.postgres.setup_db.setup_database(force, source_sql, verbose)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/setup_db.py", line 6, in setup_database
root_conn.commit()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 737, in commit
self.sql("commit")
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/database.py", line 103, in sql
return super(PostgresDatabase, self).sql(*args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 122, in sql
self.connect()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 75, in connect
self._conn = self.get_connection()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/database.py", line 69, in get_connection
self.host, self.user, self.user, self.password, self.port
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/psycopg2/__init__.py", line 126, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: could not connect to server: Connection refused
Is the server running on host "localhost" (127.0.0.1) and accepting
TCP/IP connections on port 5432?
could not connect to server: Cannot assign requested address
Is the server running on host "localhost" (::1) and accepting
TCP/IP connections on port 5432?
|
psycopg2.OperationalError
|
def make_conf(
db_name=None,
db_password=None,
site_config=None,
db_type=None,
db_host=None,
db_port=None,
):
site = frappe.local.site
make_site_config(
db_name,
db_password,
site_config,
db_type=db_type,
db_host=db_host,
db_port=db_port,
)
sites_path = frappe.local.sites_path
frappe.destroy()
frappe.init(site, sites_path=sites_path)
|
def make_conf(db_name=None, db_password=None, site_config=None, db_type=None):
site = frappe.local.site
make_site_config(db_name, db_password, site_config, db_type=db_type)
sites_path = frappe.local.sites_path
frappe.destroy()
frappe.init(site, sites_path=sites_path)
|
https://github.com/frappe/frappe/issues/12125
|
$ bench new-site mypgsql.localhost --db-type postgres --db-host postgresql
WARN: bench is installed in editable mode!
This is not the recommended mode of installation for production. Instead, install the package from PyPI with: `pip install frappe-bench`
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 99, in <module>
main()
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 18, in main
click.Group(commands=commands)(prog_name='bench')
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/site.py", line 47, in new_site
db_port=db_port, new_site=True)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/site.py", line 87, in _new_site
db_password=db_password, db_type=db_type, db_host=db_host, db_port=db_port, no_mariadb_socket=no_mariadb_socket)
File "/workspace/development/frappe-bench/apps/frappe/frappe/installer.py", line 27, in install_db
setup_database(force, source_sql, verbose, no_mariadb_socket)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/__init__.py", line 13, in setup_database
return frappe.database.postgres.setup_db.setup_database(force, source_sql, verbose)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/setup_db.py", line 6, in setup_database
root_conn.commit()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 737, in commit
self.sql("commit")
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/database.py", line 103, in sql
return super(PostgresDatabase, self).sql(*args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 122, in sql
self.connect()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 75, in connect
self._conn = self.get_connection()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/database.py", line 69, in get_connection
self.host, self.user, self.user, self.password, self.port
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/psycopg2/__init__.py", line 126, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: could not connect to server: Connection refused
Is the server running on host "localhost" (127.0.0.1) and accepting
TCP/IP connections on port 5432?
could not connect to server: Cannot assign requested address
Is the server running on host "localhost" (::1) and accepting
TCP/IP connections on port 5432?
|
psycopg2.OperationalError
|
def make_site_config(
db_name=None,
db_password=None,
site_config=None,
db_type=None,
db_host=None,
db_port=None,
):
frappe.create_folder(os.path.join(frappe.local.site_path))
site_file = get_site_config_path()
if not os.path.exists(site_file):
if not (site_config and isinstance(site_config, dict)):
site_config = get_conf_params(db_name, db_password)
if db_type:
site_config["db_type"] = db_type
if db_host:
site_config["db_host"] = db_host
if db_port:
site_config["db_port"] = db_port
with open(site_file, "w") as f:
f.write(json.dumps(site_config, indent=1, sort_keys=True))
|
def make_site_config(db_name=None, db_password=None, site_config=None, db_type=None):
frappe.create_folder(os.path.join(frappe.local.site_path))
site_file = get_site_config_path()
if not os.path.exists(site_file):
if not (site_config and isinstance(site_config, dict)):
site_config = get_conf_params(db_name, db_password)
if db_type:
site_config["db_type"] = db_type
with open(site_file, "w") as f:
f.write(json.dumps(site_config, indent=1, sort_keys=True))
|
https://github.com/frappe/frappe/issues/12125
|
$ bench new-site mypgsql.localhost --db-type postgres --db-host postgresql
WARN: bench is installed in editable mode!
This is not the recommended mode of installation for production. Instead, install the package from PyPI with: `pip install frappe-bench`
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 99, in <module>
main()
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 18, in main
click.Group(commands=commands)(prog_name='bench')
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/site.py", line 47, in new_site
db_port=db_port, new_site=True)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/site.py", line 87, in _new_site
db_password=db_password, db_type=db_type, db_host=db_host, db_port=db_port, no_mariadb_socket=no_mariadb_socket)
File "/workspace/development/frappe-bench/apps/frappe/frappe/installer.py", line 27, in install_db
setup_database(force, source_sql, verbose, no_mariadb_socket)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/__init__.py", line 13, in setup_database
return frappe.database.postgres.setup_db.setup_database(force, source_sql, verbose)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/setup_db.py", line 6, in setup_database
root_conn.commit()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 737, in commit
self.sql("commit")
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/database.py", line 103, in sql
return super(PostgresDatabase, self).sql(*args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 122, in sql
self.connect()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 75, in connect
self._conn = self.get_connection()
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/postgres/database.py", line 69, in get_connection
self.host, self.user, self.user, self.password, self.port
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/psycopg2/__init__.py", line 126, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: could not connect to server: Connection refused
Is the server running on host "localhost" (127.0.0.1) and accepting
TCP/IP connections on port 5432?
could not connect to server: Cannot assign requested address
Is the server running on host "localhost" (::1) and accepting
TCP/IP connections on port 5432?
|
psycopg2.OperationalError
|
def check_release_on_github(app: str):
"""
Check the latest release for a given Frappe application hosted on Github.
Args:
app (str): The name of the Frappe application.
Returns:
tuple(Version, str): The semantic version object of the latest release and the
organization name, if the application exists, otherwise None.
"""
from giturlparse import parse
from giturlparse.parser import ParserError
try:
# Check if repo remote is on github
remote_url = subprocess.check_output(
"cd ../apps/{} && git ls-remote --get-url".format(app), shell=True
)
except subprocess.CalledProcessError:
# Passing this since some apps may not have git initialized in them
return
if isinstance(remote_url, bytes):
remote_url = remote_url.decode()
try:
parsed_url = parse(remote_url)
except ParserError:
# Invalid URL
return
if parsed_url.resource != "github.com":
return
owner = parsed_url.owner
repo = parsed_url.name
# Get latest version from GitHub
r = requests.get(f"https://api.github.com/repos/{owner}/{repo}/releases")
if r.ok:
latest_non_beta_release = parse_latest_non_beta_release(r.json())
if latest_non_beta_release:
return Version(latest_non_beta_release), owner
|
def check_release_on_github(app):
# Check if repo remote is on github
from subprocess import CalledProcessError
try:
remote_url = subprocess.check_output(
"cd ../apps/{} && git ls-remote --get-url".format(app), shell=True
).decode()
except CalledProcessError:
# Passing this since some apps may not have git initializaed in them
return None
if isinstance(remote_url, bytes):
remote_url = remote_url.decode()
if "github.com" not in remote_url:
return None
# Get latest version from github
if "https" not in remote_url:
return None
org_name = remote_url.split("/")[3]
r = requests.get(
"https://api.github.com/repos/{}/{}/releases".format(org_name, app)
)
if r.ok:
lastest_non_beta_release = parse_latest_non_beta_release(r.json())
return Version(lastest_non_beta_release), org_name
# In case of an improper response or if there are no releases
return None
|
https://github.com/frappe/frappe/issues/11097
|
{'method_name': 'frappe.utils.change_log.check_for_update', 'log': <function log at 0x7f837e01f730>, 'retry': 0, 'is_async': True, 'user': 'Administrator', 'kwargs': {}, 'job_name': 'frappe.utils.change_log.check_for_update', 'event': 'daily_long', 'method': <function check_for_update at 0x7f837e54cae8>, 'site': 'my.erp.com'}
Traceback (most recent call last):
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/background_jobs.py", line 102, in execute_job
method(**kwargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/change_log.py", line 144, in check_for_update
app_details = check_release_on_github(app)
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/change_log.py", line 207, in check_release_on_github
return Version(lastest_non_beta_release), org_name
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/semantic_version/base.py", line 102, in __init__
raise ValueError("Call either Version('1.2.3') or Version(major=1, ...).")
ValueError: Call either Version('1.2.3') or Version(major=1, ...).
|
ValueError
|
def on_update(self):
"""update all email accounts using this domain"""
for email_account in frappe.get_all("Email Account", filters={"domain": self.name}):
try:
email_account = frappe.get_doc("Email Account", email_account.name)
for attr in [
"email_server",
"use_imap",
"use_ssl",
"use_tls",
"attachment_limit",
"smtp_server",
"smtp_port",
"use_ssl_for_outgoing",
"append_emails_to_sent_folder",
"incoming_port",
]:
email_account.set(attr, self.get(attr, default=0))
email_account.save()
except Exception as e:
frappe.msgprint(
_("Error has occurred in {0}").format(email_account.name),
raise_exception=e.__class__,
)
|
def on_update(self):
"""update all email accounts using this domain"""
for email_account in frappe.get_all("Email Account", filters={"domain": self.name}):
try:
email_account = frappe.get_doc("Email Account", email_account.name)
for attr in [
"email_server",
"use_imap",
"use_ssl",
"use_tls",
"attachment_limit",
"smtp_server",
"smtp_port",
"use_ssl_for_outgoing",
"append_emails_to_sent_folder",
]:
email_account.set(attr, self.get(attr, default=0))
email_account.save()
except Exception as e:
frappe.msgprint(
_("Error has occurred in {0}").format(email_account.name),
raise_exception=e.__class__,
)
|
https://github.com/frappe/frappe/issues/11556
|
Traceback (most recent call last):
File "/home/erpnext/frappe-bench/apps/frappe/frappe/app.py", line 64, in application
response = frappe.api.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/api.py", line 59, in handle
return frappe.handler.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 24, in handle
data = execute_cmd(cmd)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 63, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/__init__.py", line 1055, in call
return fn(*args, **newargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/desk/form/save.py", line 21, in savedocs
doc.save()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 273, in save
return self._save(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 309, in _save
self.run_before_save_methods()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 896, in run_before_save_methods
self.run_method("validate")
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 797, in run_method
out = Document.hook(fn)(self, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1073, in composer
return composed(self, method, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1056, in runner
add_to_return_value(self, fn(self, *args, **kwargs))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 791, in <lambda>
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 68, in validate
self.get_incoming_server()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 168, in get_incoming_server
email_server.connect()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 43, in connect
return self.connect_imap()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 51, in connect_imap
self.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get("pop_timeout"))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 564, in __init__
self._super.__init__(self, *args, **kwargs)
File "/usr/lib/python3.6/imaplib.py", line 1288, in __init__
IMAP4.__init__(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 198, in __init__
self.open(host, port)
File "/usr/lib/python3.6/imaplib.py", line 1301, in open
IMAP4.open(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 299, in open
self.sock = self._create_socket()
File "/usr/lib/python3.6/imaplib.py", line 1293, in _create_socket
server_hostname=self.host)
File "/usr/lib/python3.6/ssl.py", line 407, in wrap_socket
_context=self, _session=session)
File "/usr/lib/python3.6/ssl.py", line 817, in __init__
self.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 1077, in do_handshake
self._sslobj.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 689, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)
|
ssl.SSLError
|
def on_update(self):
"""update all email accounts using this domain"""
for email_account in frappe.get_all("Email Account", filters={"domain": self.name}):
try:
email_account = frappe.get_doc("Email Account", email_account.name)
email_account.set("email_server", self.email_server)
email_account.set("use_imap", self.use_imap)
email_account.set("use_ssl", self.use_ssl)
email_account.set("use_tls", self.use_tls)
email_account.set("attachment_limit", self.attachment_limit)
email_account.set("smtp_server", self.smtp_server)
email_account.set("smtp_port", self.smtp_port)
email_account.set("incoming_port", self.incoming_port)
email_account.save()
except Exception as e:
frappe.msgprint(email_account.name)
frappe.throw(e)
return None
|
def on_update(self):
"""update all email accounts using this domain"""
for email_account in frappe.get_all("Email Account", filters={"domain": self.name}):
try:
email_account = frappe.get_doc("Email Account", email_account.name)
email_account.set("email_server", self.email_server)
email_account.set("use_imap", self.use_imap)
email_account.set("use_ssl", self.use_ssl)
email_account.set("use_tls", self.use_tls)
email_account.set("attachment_limit", self.attachment_limit)
email_account.set("smtp_server", self.smtp_server)
email_account.set("smtp_port", self.smtp_port)
email_account.save()
except Exception as e:
frappe.msgprint(email_account.name)
frappe.throw(e)
return None
|
https://github.com/frappe/frappe/issues/11556
|
Traceback (most recent call last):
File "/home/erpnext/frappe-bench/apps/frappe/frappe/app.py", line 64, in application
response = frappe.api.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/api.py", line 59, in handle
return frappe.handler.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 24, in handle
data = execute_cmd(cmd)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 63, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/__init__.py", line 1055, in call
return fn(*args, **newargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/desk/form/save.py", line 21, in savedocs
doc.save()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 273, in save
return self._save(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 309, in _save
self.run_before_save_methods()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 896, in run_before_save_methods
self.run_method("validate")
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 797, in run_method
out = Document.hook(fn)(self, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1073, in composer
return composed(self, method, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1056, in runner
add_to_return_value(self, fn(self, *args, **kwargs))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 791, in <lambda>
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 68, in validate
self.get_incoming_server()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 168, in get_incoming_server
email_server.connect()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 43, in connect
return self.connect_imap()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 51, in connect_imap
self.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get("pop_timeout"))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 564, in __init__
self._super.__init__(self, *args, **kwargs)
File "/usr/lib/python3.6/imaplib.py", line 1288, in __init__
IMAP4.__init__(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 198, in __init__
self.open(host, port)
File "/usr/lib/python3.6/imaplib.py", line 1301, in open
IMAP4.open(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 299, in open
self.sock = self._create_socket()
File "/usr/lib/python3.6/imaplib.py", line 1293, in _create_socket
server_hostname=self.host)
File "/usr/lib/python3.6/ssl.py", line 407, in wrap_socket
_context=self, _session=session)
File "/usr/lib/python3.6/ssl.py", line 817, in __init__
self.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 1077, in do_handshake
self._sslobj.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 689, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)
|
ssl.SSLError
|
def application(request):
response = None
try:
rollback = True
init_request(request)
frappe.recorder.record()
frappe.monitor.start()
frappe.rate_limiter.apply()
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
response = frappe.api.handle()
elif frappe.request.path.startswith("/backups"):
response = frappe.utils.response.download_backup(request.path)
elif frappe.request.path.startswith("/private/files/"):
response = frappe.utils.response.download_private_file(request.path)
elif frappe.local.request.method in ("GET", "HEAD", "POST"):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except frappe.SessionStopped as e:
response = frappe.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, "cookie_manager"):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.rate_limiter.update()
frappe.monitor.stop(response)
frappe.recorder.dump()
frappe.logger("frappe.web", allow_site=frappe.local.site).info(
{
"site": get_site_name(request.host),
"remote_addr": getattr(request, "remote_addr", "NOTFOUND"),
"base_url": getattr(request, "base_url", "NOTFOUND"),
"full_path": getattr(request, "full_path", "NOTFOUND"),
"method": getattr(request, "method", "NOTFOUND"),
"scheme": getattr(request, "scheme", "NOTFOUND"),
"http_status_code": getattr(response, "status_code", "NOTFOUND"),
}
)
if response and hasattr(frappe.local, "rate_limiter"):
response.headers.extend(frappe.local.rate_limiter.headers())
frappe.destroy()
return response
|
def application(request):
response = None
try:
rollback = True
init_request(request)
frappe.recorder.record()
frappe.monitor.start()
frappe.rate_limiter.apply()
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
response = frappe.api.handle()
elif frappe.request.path.startswith("/backups"):
response = frappe.utils.response.download_backup(request.path)
elif frappe.request.path.startswith("/private/files/"):
response = frappe.utils.response.download_private_file(request.path)
elif frappe.local.request.method in ("GET", "HEAD", "POST"):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except frappe.SessionStopped as e:
response = frappe.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, "cookie_manager"):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.rate_limiter.update()
frappe.monitor.stop(response)
frappe.recorder.dump()
_site = get_site_name(request.host)
frappe.logger("frappe.web", allow_site=_site).info(
{
"site": _site,
"remote_addr": getattr(request, "remote_addr", "NOTFOUND"),
"base_url": getattr(request, "base_url", "NOTFOUND"),
"full_path": getattr(request, "full_path", "NOTFOUND"),
"method": getattr(request, "method", "NOTFOUND"),
"scheme": getattr(request, "scheme", "NOTFOUND"),
"http_status_code": getattr(response, "status_code", "NOTFOUND"),
}
)
if response and hasattr(frappe.local, "rate_limiter"):
response.headers.extend(frappe.local.rate_limiter.headers())
frappe.destroy()
return response
|
https://github.com/frappe/frappe/issues/11181
|
127.0.0.1 - - [03/Aug/2020 10:17:14] "GET / HTTP/1.0" 500 -
10:17:14 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:14] "GET / HTTP/1.0" 500 -
10:17:14 web.1 | Traceback (most recent call last):
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/middlewares.py", line 16, in __call__
10:17:14 web.1 | return super(StaticDataMiddleware, self).__call__(environ, start_response)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/middleware/shared_data.py", line 220, in __call__
10:17:14 web.1 | return self.app(environ, start_response)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/middleware/shared_data.py", line 220, in __call__
10:17:14 web.1 | return self.app(environ, start_response)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 231, in application
10:17:14 web.1 | return ClosingIterator(app(environ, start_response), self.cleanup)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/wrappers/base_request.py", line 237, in application
10:17:14 web.1 | resp = f(*args[:-2] + (request,))
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/app.py", line 103, in application
10:17:14 web.1 | frappe.logger("frappe.web", allow_site=_site).info({
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 1565, in logger
10:17:14 web.1 | return get_logger(module=module, with_more_info=with_more_info, allow_site=allow_site, filter=filter, max_size=max_size, file_count=file_count)
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/logger.py", line 66, in get_logger
10:17:14 web.1 | site_handler = RotatingFileHandler(sitelog_filename, maxBytes=max_size, backupCount=file_count)
10:17:14 web.1 | File "/usr/lib/python3.6/logging/handlers.py", line 150, in __init__
10:17:14 web.1 | BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
10:17:14 web.1 | File "/usr/lib/python3.6/logging/handlers.py", line 57, in __init__
10:17:14 web.1 | logging.FileHandler.__init__(self, filename, mode, encoding, delay)
10:17:14 web.1 | File "/usr/lib/python3.6/logging/__init__.py", line 1032, in __init__
10:17:14 web.1 | StreamHandler.__init__(self, self._open())
10:17:14 web.1 | File "/usr/lib/python3.6/logging/__init__.py", line 1061, in _open
10:17:14 web.1 | return open(self.baseFilename, self.mode, encoding=self.encoding)
10:17:14 web.1 | FileNotFoundError: [Errno 2] No such file or directory: '/home/frappe/frappe-bench/sites/167.71.179.108/logs/frappe.web.log'
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=style.css HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=style.css HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=jquery.js HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=jquery.js HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=debugger.js HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=debugger.js HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=console.png HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=console.png HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=ubuntu.ttf HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=ubuntu.ttf HTTP/1.0" 200 -
|
FileNotFoundError
|
def get_logger(
module=None,
with_more_info=False,
allow_site=True,
filter=None,
max_size=100_000,
file_count=20,
):
"""Application Logger for your given module
Args:
module (str, optional): Name of your logger and consequently your log file. Defaults to None.
with_more_info (bool, optional): Will log the form dict using the SiteContextFilter. Defaults to False.
allow_site ((str, bool), optional): Pass site name to explicitly log under it's logs. If True and unspecified, guesses which site the logs would be saved under. Defaults to True.
filter (function, optional): Add a filter function for your logger. Defaults to None.
max_size (int, optional): Max file size of each log file in bytes. Defaults to 100_000.
file_count (int, optional): Max count of log files to be retained via Log Rotation. Defaults to 20.
Returns:
<class 'logging.Logger'>: Returns a Python logger object with Site and Bench level logging capabilities.
"""
if allow_site is True:
site = getattr(frappe.local, "site", None)
elif allow_site in get_sites():
site = allow_site
else:
site = False
logger_name = "{0}-{1}".format(module, site or "all")
try:
return frappe.loggers[logger_name]
except KeyError:
pass
if not module:
module = "frappe"
with_more_info = True
logfile = module + ".log"
log_filename = os.path.join("..", "logs", logfile)
logger = logging.getLogger(logger_name)
logger.setLevel(frappe.log_level or default_log_level)
logger.propagate = False
formatter = logging.Formatter(
"%(asctime)s %(levelname)s {0} %(message)s".format(module)
)
handler = RotatingFileHandler(
log_filename, maxBytes=max_size, backupCount=file_count
)
handler.setFormatter(formatter)
logger.addHandler(handler)
if site:
sitelog_filename = os.path.join(site, "logs", logfile)
site_handler = RotatingFileHandler(
sitelog_filename, maxBytes=max_size, backupCount=file_count
)
site_handler.setFormatter(formatter)
logger.addHandler(site_handler)
if with_more_info:
handler.addFilter(SiteContextFilter())
if filter:
logger.addFilter(filter)
frappe.loggers[logger_name] = logger
return logger
|
def get_logger(
module=None,
with_more_info=False,
allow_site=True,
filter=None,
max_size=100_000,
file_count=20,
):
"""Application Logger for your given module
Args:
module (str, optional): Name of your logger and consequently your log file. Defaults to None.
with_more_info (bool, optional): Will log the form dict using the SiteContextFilter. Defaults to False.
allow_site ((str, bool), optional): Pass site name to explicitly log under it's logs. If True and unspecified, guesses which site the logs would be saved under. Defaults to True.
filter (function, optional): Add a filter function for your logger. Defaults to None.
max_size (int, optional): Max file size of each log file in bytes. Defaults to 100_000.
file_count (int, optional): Max count of log files to be retained via Log Rotation. Defaults to 20.
Returns:
<class 'logging.Logger'>: Returns a Python logger object with Site and Bench level logging capabilities.
"""
if allow_site is True:
site = getattr(frappe.local, "site", None)
elif allow_site:
site = allow_site
else:
site = False
logger_name = "{0}-{1}".format(module, site or "all")
try:
return frappe.loggers[logger_name]
except KeyError:
pass
if not module:
module = "frappe"
with_more_info = True
logfile = module + ".log"
log_filename = os.path.join("..", "logs", logfile)
logger = logging.getLogger(logger_name)
logger.setLevel(frappe.log_level or default_log_level)
logger.propagate = False
formatter = logging.Formatter(
"%(asctime)s %(levelname)s {0} %(message)s".format(module)
)
handler = RotatingFileHandler(
log_filename, maxBytes=max_size, backupCount=file_count
)
handler.setFormatter(formatter)
logger.addHandler(handler)
if site:
sitelog_filename = os.path.join(site, "logs", logfile)
site_handler = RotatingFileHandler(
sitelog_filename, maxBytes=max_size, backupCount=file_count
)
site_handler.setFormatter(formatter)
logger.addHandler(site_handler)
if with_more_info:
handler.addFilter(SiteContextFilter())
if filter:
logger.addFilter(filter)
frappe.loggers[logger_name] = logger
return logger
|
https://github.com/frappe/frappe/issues/11181
|
127.0.0.1 - - [03/Aug/2020 10:17:14] "GET / HTTP/1.0" 500 -
10:17:14 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:14] "GET / HTTP/1.0" 500 -
10:17:14 web.1 | Traceback (most recent call last):
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/middlewares.py", line 16, in __call__
10:17:14 web.1 | return super(StaticDataMiddleware, self).__call__(environ, start_response)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/middleware/shared_data.py", line 220, in __call__
10:17:14 web.1 | return self.app(environ, start_response)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/middleware/shared_data.py", line 220, in __call__
10:17:14 web.1 | return self.app(environ, start_response)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 231, in application
10:17:14 web.1 | return ClosingIterator(app(environ, start_response), self.cleanup)
10:17:14 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/werkzeug/wrappers/base_request.py", line 237, in application
10:17:14 web.1 | resp = f(*args[:-2] + (request,))
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/app.py", line 103, in application
10:17:14 web.1 | frappe.logger("frappe.web", allow_site=_site).info({
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 1565, in logger
10:17:14 web.1 | return get_logger(module=module, with_more_info=with_more_info, allow_site=allow_site, filter=filter, max_size=max_size, file_count=file_count)
10:17:14 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/logger.py", line 66, in get_logger
10:17:14 web.1 | site_handler = RotatingFileHandler(sitelog_filename, maxBytes=max_size, backupCount=file_count)
10:17:14 web.1 | File "/usr/lib/python3.6/logging/handlers.py", line 150, in __init__
10:17:14 web.1 | BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
10:17:14 web.1 | File "/usr/lib/python3.6/logging/handlers.py", line 57, in __init__
10:17:14 web.1 | logging.FileHandler.__init__(self, filename, mode, encoding, delay)
10:17:14 web.1 | File "/usr/lib/python3.6/logging/__init__.py", line 1032, in __init__
10:17:14 web.1 | StreamHandler.__init__(self, self._open())
10:17:14 web.1 | File "/usr/lib/python3.6/logging/__init__.py", line 1061, in _open
10:17:14 web.1 | return open(self.baseFilename, self.mode, encoding=self.encoding)
10:17:14 web.1 | FileNotFoundError: [Errno 2] No such file or directory: '/home/frappe/frappe-bench/sites/167.71.179.108/logs/frappe.web.log'
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=style.css HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=style.css HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=jquery.js HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=jquery.js HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=debugger.js HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=debugger.js HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=console.png HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=console.png HTTP/1.0" 200 -
10:17:15 web.1 | 127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=ubuntu.ttf HTTP/1.0" 200 -
10:17:15 web.1 | INFO:werkzeug:127.0.0.1 - - [03/Aug/2020 10:17:15] "GET /?__debugger__=yes&cmd=resource&f=ubuntu.ttf HTTP/1.0" 200 -
|
FileNotFoundError
|
def update_user_setting_filters(data, key, user_setting):
timespan_map = {
"1 week": "week",
"1 month": "month",
"3 months": "quarter",
"6 months": "6 months",
"1 year": "year",
}
period_map = {"Previous": "last", "Next": "next"}
if data.get(key):
update = False
if isinstance(data.get(key), dict):
filters = data.get(key).get("filters")
if filters and isinstance(filters, list):
for f in filters:
if f[2] == "Next" or f[2] == "Previous":
update = True
f[3] = period_map[f[2]] + " " + timespan_map[f[3]]
f[2] = "Timespan"
if update:
data[key]["filters"] = filters
update_user_settings(
user_setting["doctype"], json.dumps(data), for_update=True
)
|
def update_user_setting_filters(data, key, user_setting):
timespan_map = {
"1 week": "week",
"1 month": "month",
"3 months": "quarter",
"6 months": "6 months",
"1 year": "year",
}
period_map = {"Previous": "last", "Next": "next"}
if data.get(key):
update = False
if isinstance(data.get(key), dict):
filters = data.get(key).get("filters")
for f in filters:
if f[2] == "Next" or f[2] == "Previous":
update = True
f[3] = period_map[f[2]] + " " + timespan_map[f[3]]
f[2] = "Timespan"
if update:
data[key]["filters"] = filters
update_user_settings(
user_setting["doctype"], json.dumps(data), for_update=True
)
|
https://github.com/frappe/frappe/issues/10560
|
Patching sites...
Migrating bench-manager.local
Updating DocTypes for frappe : [========================================] 100%
Updating DocTypes for bench_manager : [========================================] 100%
Updating Dashboard for frappe
Updating Dashboard for bench_manager
fatal: Needed a single revision
Generating Website Theme Files...
Migrating site1.local
Executing frappe.patches.v13_0.update_date_filters_in_user_settings in site1.local (_1bd3e0294da19198)
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 99, in <module>
main()
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 18, in main
click.Group(commands=commands)(prog_name='bench')
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/commands/__init__.py", line 26, in _func
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/commands/site.py", line 281, in migrate
migrate(context.verbose, rebuild_website=rebuild_website, skip_failing=skip_failing)
File "/home/frappe/frappe-bench/apps/frappe/frappe/migrate.py", line 67, in migrate
frappe.modules.patch_handler.run_all(skip_failing)
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 41, in run_all
run_patch(patch)
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 30, in run_patch
if not run_single(patchmodule = patch):
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 71, in run_single
return execute_patch(patchmodule, method, methodargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 91, in execute_patch
frappe.get_attr(patchmodule.split()[0] + ".execute")()
File "/home/frappe/frappe-bench/apps/frappe/frappe/patches/v13_0/update_date_filters_in_user_settings.py", line 20, in execute
update_user_setting_filters(data, key, setting)
File "/home/frappe/frappe-bench/apps/frappe/frappe/patches/v13_0/update_date_filters_in_user_settings.py", line 43, in update_user_setting_filters
for f in filters:
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def send_email(success, service_name, doctype, email_field, error_status=None):
recipients = get_recipients(doctype, email_field)
if not recipients:
frappe.log_error(
"No Email Recipient found for {0}".format(service_name),
"{0}: Failed to send backup status email".format(service_name),
)
return
if success:
if not frappe.db.get_value(doctype, None, "send_email_for_successful_backup"):
return
subject = "Backup Upload Successful"
message = """
<h3>Backup Uploaded Successfully!</h3>
<p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>""".format(
service_name
)
else:
subject = "[Warning] Backup Upload Failed"
message = """
<h3>Backup Upload Failed!</h3>
<p>Oops, your automated backup to {0} failed.</p>
<p>Error message: {1}</p>
<p>Please contact your system manager for more information.</p>""".format(
service_name, error_status
)
frappe.sendmail(recipients=recipients, subject=subject, message=message)
|
def send_email(success, service_name, doctype, email_field, error_status=None):
recipients = get_recipients(service_name, email_field)
if not recipients:
frappe.log_error(
"No Email Recipient found for {0}".format(service_name),
"{0}: Failed to send backup status email".format(service_name),
)
return
if success:
if not frappe.db.get_value(doctype, None, "send_email_for_successful_backup"):
return
subject = "Backup Upload Successful"
message = """
<h3>Backup Uploaded Successfully!</h3>
<p>Hi there, this is just to inform you that your backup was successfully uploaded to your {0} bucket. So relax!</p>""".format(
service_name
)
else:
subject = "[Warning] Backup Upload Failed"
message = """
<h3>Backup Upload Failed!</h3>
<p>Oops, your automated backup to {0} failed.</p>
<p>Error message: {1}</p>
<p>Please contact your system manager for more information.</p>""".format(
service_name, error_status
)
frappe.sendmail(recipients=recipients, subject=subject, message=message)
|
https://github.com/frappe/frappe/issues/10080
|
[2020-04-23 16:08:24 +0000] [3664] [ERROR] Error handling request /
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/app.py", line 53, in application
init_request(request)
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/app.py", line 108, in init_request
frappe.init(site=site, sites_path=_sites_path)
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/__init__.py", line 150, in init
local.conf = _dict(get_site_config())
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/__init__.py", line 224, in get_site_config
sys.exit(1)
SystemExit: 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 72, in __getattr__
return self.__storage__[self.__ident_func__()][name]
KeyError: 'conf'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 309, in _get_current_object
return getattr(self.__local, self.__name__)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 74, in __getattr__
raise AttributeError(name)
AttributeError: conf
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/gunicorn/workers/sync.py", line 135, in handle
self.handle_request(listener, req, client, addr)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/gunicorn/workers/sync.py", line 176, in handle_request
respiter = self.wsgi(environ, resp.start_response)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 231, in application
return ClosingIterator(app(environ, start_response), self.cleanup)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/wrappers/base_request.py", line 237, in application
resp = f(*args[:-2] + (request,))
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/app.py", line 96, in application
frappe.monitor.stop(response)
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/monitor.py", line 26, in stop
if frappe.conf.monitor and hasattr(frappe.local, "monitor"):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 311, in _get_current_object
raise RuntimeError("no object bound to %s" % self.__name__)
RuntimeError: no object bound to conf
|
KeyError
|
def get_recipients(doctype, email_field):
if not frappe.db:
frappe.connect()
return split_emails(frappe.db.get_value(doctype, None, email_field))
|
def get_recipients(service_name, email_field):
if not frappe.db:
frappe.connect()
return split_emails(frappe.db.get_value(service_name, None, email_field))
|
https://github.com/frappe/frappe/issues/10080
|
[2020-04-23 16:08:24 +0000] [3664] [ERROR] Error handling request /
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/app.py", line 53, in application
init_request(request)
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/app.py", line 108, in init_request
frappe.init(site=site, sites_path=_sites_path)
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/__init__.py", line 150, in init
local.conf = _dict(get_site_config())
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/__init__.py", line 224, in get_site_config
sys.exit(1)
SystemExit: 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 72, in __getattr__
return self.__storage__[self.__ident_func__()][name]
KeyError: 'conf'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 309, in _get_current_object
return getattr(self.__local, self.__name__)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 74, in __getattr__
raise AttributeError(name)
AttributeError: conf
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/gunicorn/workers/sync.py", line 135, in handle
self.handle_request(listener, req, client, addr)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/gunicorn/workers/sync.py", line 176, in handle_request
respiter = self.wsgi(environ, resp.start_response)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 231, in application
return ClosingIterator(app(environ, start_response), self.cleanup)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/wrappers/base_request.py", line 237, in application
resp = f(*args[:-2] + (request,))
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/app.py", line 96, in application
frappe.monitor.stop(response)
File "/home/ubuntu/frappe-bench/apps/frappe/frappe/monitor.py", line 26, in stop
if frappe.conf.monitor and hasattr(frappe.local, "monitor"):
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/home/ubuntu/frappe-bench/env/lib/python3.6/site-packages/werkzeug/local.py", line 311, in _get_current_object
raise RuntimeError("no object bound to %s" % self.__name__)
RuntimeError: no object bound to conf
|
KeyError
|
def execute():
frappe.reload_doc("desk", "doctype", "dashboard_chart")
if not frappe.db.table_exists("Dashboard Chart"):
return
users_with_permission = frappe.get_all(
"Has Role",
fields=["parent"],
filters={
"role": ["in", ["System Manager", "Dashboard Manager"]],
"parenttype": "User",
},
distinct=True,
)
users = [item.parent for item in users_with_permission]
charts = frappe.db.get_all("Dashboard Chart", filters={"owner": ["in", users]})
for chart in charts:
frappe.db.set_value("Dashboard Chart", chart.name, "is_public", 1)
|
def execute():
frappe.reload_doc("desk", "doctype", "dashboard_chart")
if not frappe.db.table_exists("Dashboard Chart"):
return
users_with_permission = frappe.get_all(
"Has Role",
fields=["parent"],
filters={
"role": ["in", ["System Manager", "Dashboard Manager"]],
"parenttype": "User",
},
distinct=True,
as_list=True,
)
users = tuple(
[
item if type(item) == str else item.encode("utf8")
for sublist in users_with_permission
for item in sublist
]
)
frappe.db.sql(
"""
UPDATE
`tabDashboard Chart`
SET
`tabDashboard Chart`.`is_public`=1
WHERE
`tabDashboard Chart`.owner in {users}
""".format(users=users)
)
|
https://github.com/frappe/frappe/issues/10205
|
Executing frappe.patches.v13_0.set_existing_dashboard_charts_as_public in v12up.localhost (_b0f4a3ee8e7b5021)
Syntax error in query:
UPDATE
`tabDashboard Chart`
SET
`tabDashboard Chart`.`is_public`=1
WHERE
`tabDashboard Chart`.owner in ('Administrator',)
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 97, in <module>
main()
File "/workspace/development/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 18, in main
click.Group(commands=commands)(prog_name='bench')
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/__init__.py", line 25, in _func
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/commands/site.py", line 252, in migrate
migrate(context.verbose, rebuild_website=rebuild_website, skip_failing=skip_failing)
File "/workspace/development/frappe-bench/apps/frappe/frappe/migrate.py", line 49, in migrate
frappe.modules.patch_handler.run_all(skip_failing)
File "/workspace/development/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 41, in run_all
run_patch(patch)
File "/workspace/development/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 30, in run_patch
if not run_single(patchmodule = patch):
File "/workspace/development/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 71, in run_single
return execute_patch(patchmodule, method, methodargs)
File "/workspace/development/frappe-bench/apps/frappe/frappe/modules/patch_handler.py", line 91, in execute_patch
frappe.get_attr(patchmodule.split()[0] + ".execute")()
File "/workspace/development/frappe-bench/apps/frappe/frappe/patches/v13_0/set_existing_dashboard_charts_as_public.py", line 28, in execute
""".format(users=users)
File "/workspace/development/frappe-bench/apps/frappe/frappe/database/database.py", line 173, in sql
self._cursor.execute(query)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/cursors.py", line 170, in execute
result = self._query(query)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/cursors.py", line 328, in _query
conn.query(q)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/connections.py", line 517, in query
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/connections.py", line 732, in _read_query_result
result.read()
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/connections.py", line 1075, in read
first_packet = self.connection._read_packet()
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/connections.py", line 684, in _read_packet
packet.check_error()
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/protocol.py", line 220, in check_error
err.raise_mysql_exception(self._data)
File "/workspace/development/frappe-bench/env/lib/python3.7/site-packages/pymysql/err.py", line 109, in raise_mysql_exception
raise errorclass(errno, errval)
pymysql.err.ProgrammingError: (1064, "You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 6")
|
pymysql.err.ProgrammingError
|
def get_result(doc, to_date=None):
doc = frappe.parse_json(doc)
fields = []
sql_function_map = {
"Count": "count",
"Sum": "sum",
"Average": "avg",
"Minimum": "min",
"Maximum": "max",
}
function = sql_function_map[doc.function]
if function == "count":
fields = ["{function}(*) as result".format(function=function)]
else:
fields = [
"{function}({based_on}) as result".format(
function=function, based_on=doc.aggregate_function_based_on
)
]
filters = frappe.parse_json(doc.filters_json)
if to_date:
filters.append([doc.document_type, "creation", "<", to_date, False])
res = frappe.db.get_all(doc.document_type, fields=fields, filters=filters)
number = res[0]["result"] if res else 0
return number
|
def get_result(doc, to_date=None):
doc = frappe.parse_json(doc)
fields = []
sql_function_map = {
"Count": "count",
"Sum": "sum",
"Average": "avg",
"Minimum": "min",
"Maximum": "max",
}
function = sql_function_map[doc.function]
if function == "count":
fields = ["{function}(*) as result".format(function=function)]
else:
fields = [
"{function}({based_on}) as result".format(
function=function, based_on=doc.aggregate_function_based_on
)
]
filters = frappe.parse_json(doc.filters_json)
if to_date:
filters.append([doc.document_type, "creation", "<", to_date, False])
res = frappe.db.get_all(doc.document_type, fields=fields, filters=filters)
number = res[0]["result"] if res else 0
frappe.db.set_value("Number Card", doc.name, "previous_result", number)
return number
|
https://github.com/frappe/frappe/issues/10156
|
Traceback (most recent call last):
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/app.py", line 62, in application
19:47:45 web.1 | response = frappe.api.handle()
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/api.py", line 58, in handle
19:47:45 web.1 | return frappe.handler.handle()
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/handler.py", line 24, in handle
19:47:45 web.1 | data = execute_cmd(cmd)
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/handler.py", line 63, in execute_cmd
19:47:45 web.1 | return frappe.call(method, **frappe.form_dict)
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 1074, in call
19:47:45 web.1 | return fn(*args, **newargs)
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/desk/doctype/number_card/number_card.py", line 70, in get_result
19:47:45 web.1 | frappe.db.set_value('Number Card', doc.name, 'previous_result', number)
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/database/database.py", line 653, in set_value
19:47:45 web.1 | values, debug=debug)
19:47:45 web.1 | File "/home/frappe/frappe-bench/apps/frappe/frappe/database/database.py", line 156, in sql
19:47:45 web.1 | self._cursor.execute(query, values)
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/cursors.py", line 170, in execute
19:47:45 web.1 | result = self._query(query)
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/cursors.py", line 328, in _query
19:47:45 web.1 | conn.query(q)
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/connections.py", line 517, in query
19:47:45 web.1 | self._affected_rows = self._read_query_result(unbuffered=unbuffered)
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/connections.py", line 732, in _read_query_result
19:47:45 web.1 | result.read()
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/connections.py", line 1075, in read
19:47:45 web.1 | first_packet = self.connection._read_packet()
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/connections.py", line 684, in _read_packet
19:47:45 web.1 | packet.check_error()
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/protocol.py", line 220, in check_error
19:47:45 web.1 | err.raise_mysql_exception(self._data)
19:47:45 web.1 | File "/home/frappe/frappe-bench/env/lib/python3.6/site-packages/pymysql/err.py", line 109, in raise_mysql_exception
19:47:45 web.1 | raise errorclass(errno, errval)
19:47:45 web.1 | pymysql.err.InternalError: (1054, "Unknown column 'previous_result' in 'field list'")
|
pymysql.err.InternalError
|
def rebuild_for_doctype(doctype):
"""
Rebuild entries of doctype's documents in __global_search on change of
searchable fields
:param doctype: Doctype
"""
if frappe.local.conf.get("disable_global_search"):
return
if frappe.local.conf.get("disable_global_search"):
return
def _get_filters():
filters = frappe._dict({"docstatus": ["!=", 2]})
if meta.has_field("enabled"):
filters.enabled = 1
if meta.has_field("disabled"):
filters.disabled = 0
return filters
meta = frappe.get_meta(doctype)
if cint(meta.issingle) == 1:
return
if cint(meta.istable) == 1:
parent_doctypes = frappe.get_all(
"DocField",
fields="parent",
filters={
"fieldtype": ["in", frappe.model.table_fields],
"options": doctype,
},
)
for p in parent_doctypes:
rebuild_for_doctype(p.parent)
return
# Delete records
delete_global_search_records_for_doctype(doctype)
parent_search_fields = meta.get_global_search_fields()
fieldnames = get_selected_fields(meta, parent_search_fields)
# Get all records from parent doctype table
all_records = frappe.get_all(doctype, fields=fieldnames, filters=_get_filters())
# Children data
all_children, child_search_fields = get_children_data(doctype, meta)
all_contents = []
for doc in all_records:
content = []
for field in parent_search_fields:
value = doc.get(field.fieldname)
if value:
content.append(get_formatted_value(value, field))
# get children data
for child_doctype, records in all_children.get(doc.name, {}).items():
for field in child_search_fields.get(child_doctype):
for r in records:
if r.get(field.fieldname):
content.append(
get_formatted_value(r.get(field.fieldname), field)
)
if content:
# if doctype published in website, push title, route etc.
published = 0
title, route = "", ""
try:
if (
hasattr(get_controller(doctype), "is_website_published")
and meta.allow_guest_to_view
):
d = frappe.get_doc(doctype, doc.name)
published = 1 if d.is_website_published() else 0
title = d.get_title()
route = d.get("route")
except ImportError:
# some doctypes has been deleted via future patch, hence controller does not exists
pass
all_contents.append(
{
"doctype": frappe.db.escape(doctype),
"name": frappe.db.escape(doc.name),
"content": frappe.db.escape(" ||| ".join(content or "")),
"published": published,
"title": frappe.db.escape(title or "")[
: int(frappe.db.VARCHAR_LEN)
],
"route": frappe.db.escape(route or "")[
: int(frappe.db.VARCHAR_LEN)
],
}
)
if all_contents:
insert_values_for_multiple_docs(all_contents)
|
def rebuild_for_doctype(doctype):
"""
Rebuild entries of doctype's documents in __global_search on change of
searchable fields
:param doctype: Doctype
"""
if frappe.local.conf.get("disable_global_search"):
return
if frappe.local.conf.get("disable_global_search"):
return
def _get_filters():
filters = frappe._dict({"docstatus": ["!=", 2]})
if meta.has_field("enabled"):
filters.enabled = 1
if meta.has_field("disabled"):
filters.disabled = 0
return filters
meta = frappe.get_meta(doctype)
if cint(meta.istable) == 1:
parent_doctypes = frappe.get_all(
"DocField",
fields="parent",
filters={
"fieldtype": ["in", frappe.model.table_fields],
"options": doctype,
},
)
for p in parent_doctypes:
rebuild_for_doctype(p.parent)
return
# Delete records
delete_global_search_records_for_doctype(doctype)
parent_search_fields = meta.get_global_search_fields()
fieldnames = get_selected_fields(meta, parent_search_fields)
# Get all records from parent doctype table
all_records = frappe.get_all(doctype, fields=fieldnames, filters=_get_filters())
# Children data
all_children, child_search_fields = get_children_data(doctype, meta)
all_contents = []
for doc in all_records:
content = []
for field in parent_search_fields:
value = doc.get(field.fieldname)
if value:
content.append(get_formatted_value(value, field))
# get children data
for child_doctype, records in all_children.get(doc.name, {}).items():
for field in child_search_fields.get(child_doctype):
for r in records:
if r.get(field.fieldname):
content.append(
get_formatted_value(r.get(field.fieldname), field)
)
if content:
# if doctype published in website, push title, route etc.
published = 0
title, route = "", ""
try:
if (
hasattr(get_controller(doctype), "is_website_published")
and meta.allow_guest_to_view
):
d = frappe.get_doc(doctype, doc.name)
published = 1 if d.is_website_published() else 0
title = d.get_title()
route = d.get("route")
except ImportError:
# some doctypes has been deleted via future patch, hence controller does not exists
pass
all_contents.append(
{
"doctype": frappe.db.escape(doctype),
"name": frappe.db.escape(doc.name),
"content": frappe.db.escape(" ||| ".join(content or "")),
"published": published,
"title": frappe.db.escape(title or "")[
: int(frappe.db.VARCHAR_LEN)
],
"route": frappe.db.escape(route or "")[
: int(frappe.db.VARCHAR_LEN)
],
}
)
if all_contents:
insert_values_for_multiple_docs(all_contents)
|
https://github.com/frappe/frappe/issues/8779
|
Traceback (most recent call last):
File "/usr/lib/python3.5/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/bob/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 97, in <module>
main()
File "/home/bob/frappe-bench/apps/frappe/frappe/utils/bench_helper.py", line 18, in main
click.Group(commands=commands)(prog_name='bench')
File "/home/bob/frappe-bench/env/lib/python3.5/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/bob/frappe-bench/env/lib/python3.5/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/bob/frappe-bench/env/lib/python3.5/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/bob/frappe-bench/env/lib/python3.5/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/bob/frappe-bench/env/lib/python3.5/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/bob/frappe-bench/env/lib/python3.5/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/bob/frappe-bench/env/lib/python3.5/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/home/bob/frappe-bench/apps/frappe/frappe/commands/__init__.py", line 25, in _func
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
File "/home/bob/frappe-bench/apps/frappe/frappe/commands/utils.py", line 677, in rebuild_global_search
rebuild_for_doctype(doctype)
File "/home/bob/frappe-bench/apps/frappe/frappe/utils/global_search.py", line 101, in rebuild_for_doctype
all_records = frappe.get_all(doctype, fields=fieldnames, filters=_get_filters())
File "/home/bob/frappe-bench/apps/frappe/frappe/__init__.py", line 1302, in get_all
return get_list(doctype, *args, **kwargs)
File "/home/bob/frappe-bench/apps/frappe/frappe/__init__.py", line 1275, in get_list
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
File "/home/bob/frappe-bench/apps/frappe/frappe/model/db_query.py", line 96, in execute
result = self.build_and_run()
File "/home/bob/frappe-bench/apps/frappe/frappe/model/db_query.py", line 110, in build_and_run
args = self.prepare_args()
File "/home/bob/frappe-bench/apps/frappe/frappe/model/db_query.py", line 136, in prepare_args
self.set_optional_columns()
File "/home/bob/frappe-bench/apps/frappe/frappe/model/db_query.py", line 294, in set_optional_columns
columns = get_table_columns(self.doctype)
File "/home/bob/frappe-bench/apps/frappe/frappe/model/meta.py", line 49, in get_table_columns
return frappe.db.get_table_columns(doctype)
File "/home/bob/frappe-bench/apps/frappe/frappe/database/database.py", line 864, in get_table_columns
raise self.TableMissingError('DocType', doctype)
pymysql.err.ProgrammingError: ('DocType', 'Quick Stock Balance')
|
pymysql.err.ProgrammingError
|
def get_transitions(doc, workflow=None):
"""Return list of possible transitions for the given doc"""
doc = frappe.get_doc(frappe.parse_json(doc))
if doc.is_new():
return []
frappe.has_permission(doc.doctype, "read", throw=True)
roles = frappe.get_roles()
if not workflow:
workflow = get_workflow(doc.doctype)
current_state = doc.get(workflow.workflow_state_field)
if not current_state:
frappe.throw(_("Workflow State not set"), WorkflowStateError)
transitions = []
for transition in workflow.transitions:
if transition.state == current_state and transition.allowed in roles:
if transition.condition:
# if condition, evaluate
# access to frappe.db.get_value and frappe.db.get_list
success = frappe.safe_eval(
transition.condition,
dict(
frappe=frappe._dict(
db=frappe._dict(
get_value=frappe.db.get_value,
get_list=frappe.db.get_list,
),
session=frappe.session,
)
),
dict(doc=doc),
)
if not success:
continue
transitions.append(transition.as_dict())
return transitions
|
def get_transitions(doc, workflow=None):
"""Return list of possible transitions for the given doc"""
doc = frappe.get_doc(frappe.parse_json(doc))
if doc.is_new():
return []
frappe.has_permission(doc, "read", throw=True)
roles = frappe.get_roles()
if not workflow:
workflow = get_workflow(doc.doctype)
current_state = doc.get(workflow.workflow_state_field)
if not current_state:
frappe.throw(_("Workflow State not set"), WorkflowStateError)
transitions = []
for transition in workflow.transitions:
if transition.state == current_state and transition.allowed in roles:
if transition.condition:
# if condition, evaluate
# access to frappe.db.get_value and frappe.db.get_list
success = frappe.safe_eval(
transition.condition,
dict(
frappe=frappe._dict(
db=frappe._dict(
get_value=frappe.db.get_value,
get_list=frappe.db.get_list,
),
session=frappe.session,
)
),
dict(doc=doc),
)
if not success:
continue
transitions.append(transition.as_dict())
return transitions
|
https://github.com/frappe/frappe/issues/5448
|
Traceback (most recent call last):
File "/home/frappe/frappe-bench/apps/frappe/frappe/app.py", line 62, in application
response = frappe.handler.handle()
File "/home/frappe/frappe-bench/apps/frappe/frappe/handler.py", line 22, in handle
data = execute_cmd(cmd)
File "/home/frappe/frappe-bench/apps/frappe/frappe/handler.py", line 53, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 942, in call
return fn(*args, **newargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/workflow.py", line 30, in get_transitions
frappe.has_permission(doc, 'read', throw=True)
File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 535, in has_permission
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
File "/home/frappe/frappe-bench/apps/frappe/frappe/permissions.py", line 41, in has_permission
meta = frappe.get_meta(doctype)
File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 658, in get_meta
return frappe.model.meta.get_meta(doctype, cached=cached)
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/meta.py", line 34, in get_meta
lambda: Meta(doctype))
File "/home/frappe/frappe-bench/apps/frappe/frappe/utils/redis_wrapper.py", line 173, in hget
value = generator()
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/meta.py", line 34, in <lambda>
lambda: Meta(doctype))
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/meta.py", line 71, in __init__
super(Meta, self).__init__(doctype.as_dict())
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/document.py", line 114, in __init__
self.init_valid_columns()
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/base_document.py", line 232, in init_valid_columns
for key in self.get_valid_columns():
File "/home/frappe/frappe-bench/apps/frappe/frappe/model/meta.py", line 129, in get_valid_columns
[df.fieldname for df in self.get("fields") if df.fieldtype in type_map]
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def normalize_launch_params(params: Dict) -> None:
if "env" in params:
params["env"] = {name: str(value) for [name, value] in params["env"].items()}
if "ignoreDefaultArgs" in params:
if params["ignoreDefaultArgs"] is True:
params["ignoreAllDefaultArgs"] = True
del params["ignoreDefaultArgs"]
if "executablePath" in params:
params["executablePath"] = str(Path(params["executablePath"]))
if "downloadsPath" in params:
params["downloadsPath"] = str(Path(params["downloadsPath"]))
|
def normalize_launch_params(params: Dict) -> None:
if "env" in params:
params["env"] = {name: str(value) for [name, value] in params["env"].items()}
if "ignoreDefaultArgs" in params:
if isinstance(params["ignoreDefaultArgs"], bool):
params["ignoreAllDefaultArgs"] = True
del params["ignoreDefaultArgs"]
params["env"] = {name: str(value) for [name, value] in params["env"].items()}
if "executablePath" in params:
params["executablePath"] = str(Path(params["executablePath"]))
if "downloadsPath" in params:
params["downloadsPath"] = str(Path(params["downloadsPath"]))
|
https://github.com/microsoft/playwright-python/issues/189
|
Traceback (most recent call last):
File "/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py", line 68, in launch
return from_channel(await self._channel.send("launch", params))
File "/home/user/.local/lib/python3.8/site-packages/playwright/connection.py", line 39, in send
result = await callback.future
File "/usr/lib/python3.8/asyncio/futures.py", line 260, in __await__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.8/asyncio/tasks.py", line 349, in __wakeup
future.result()
File "/usr/lib/python3.8/asyncio/futures.py", line 178, in result
raise self._exception
playwright.helper.TimeoutError: Timeout 30000ms exceeded.
=========================== logs ===========================
<launching> /home/user/.cache/ms-playwright/chromium-799411/chrome-linux/chrome
<launched> pid=301672
[err] [301699:301699:0914/061714.839670:ERROR:sandbox_linux.cc(374)] InitializeSandbox() called with multiple threads in process gpu-process.
|
playwright.helper.TimeoutError
|
def __init__(self, parent, config, imtypes):
# Settting the GUI size and panels design
displays = (
wx.Display(i) for i in range(wx.Display.GetCount())
) # Gets the number of displays
screenSizes = [
display.GetGeometry().GetSize() for display in displays
] # Gets the size of each display
index = 0 # For display 1.
screenWidth = screenSizes[index][0]
screenHeight = screenSizes[index][1]
self.gui_size = (screenWidth * 0.7, screenHeight * 0.85)
self.imtypes = imtypes # imagetypes to look for in folder e.g. *.png
wx.Frame.__init__(
self,
parent,
id=wx.ID_ANY,
title="DeepLabCut2.0 - Labeling ToolBox",
size=wx.Size(self.gui_size),
pos=wx.DefaultPosition,
style=wx.RESIZE_BORDER | wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL,
)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText(
"Looking for a folder to start labeling. Click 'Load frames' to begin."
)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyPressed)
self.SetSizeHints(
wx.Size(self.gui_size)
) # This sets the minimum size of the GUI. It can scale now!
###################################################################################################################################################
# Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!
topSplitter = wx.SplitterWindow(self)
vSplitter = wx.SplitterWindow(topSplitter)
self.image_panel = ImagePanel(vSplitter, config, self.gui_size)
self.choice_panel = ScrollPanel(vSplitter)
vSplitter.SplitVertically(
self.image_panel, self.choice_panel, sashPosition=self.gui_size[0] * 0.8
)
vSplitter.SetSashGravity(1)
self.widget_panel = WidgetPanel(topSplitter)
topSplitter.SplitHorizontally(
vSplitter, self.widget_panel, sashPosition=self.gui_size[1] * 0.83
) # 0.9
topSplitter.SetSashGravity(1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(topSplitter, 1, wx.EXPAND)
self.SetSizer(sizer)
###################################################################################################################################################
# Add Buttons to the WidgetPanel and bind them to their respective functions.
widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)
self.load = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Load frames")
widgetsizer.Add(self.load, 1, wx.ALL, 15)
self.load.Bind(wx.EVT_BUTTON, self.browseDir)
self.prev = wx.Button(self.widget_panel, id=wx.ID_ANY, label="<<Previous")
widgetsizer.Add(self.prev, 1, wx.ALL, 15)
self.prev.Bind(wx.EVT_BUTTON, self.prevImage)
self.prev.Enable(False)
self.next = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Next>>")
widgetsizer.Add(self.next, 1, wx.ALL, 15)
self.next.Bind(wx.EVT_BUTTON, self.nextImage)
self.next.Enable(False)
self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Help")
widgetsizer.Add(self.help, 1, wx.ALL, 15)
self.help.Bind(wx.EVT_BUTTON, self.helpButton)
self.help.Enable(True)
#
self.zoom = wx.ToggleButton(self.widget_panel, label="Zoom")
widgetsizer.Add(self.zoom, 1, wx.ALL, 15)
self.zoom.Bind(wx.EVT_TOGGLEBUTTON, self.zoomButton)
self.widget_panel.SetSizer(widgetsizer)
self.zoom.Enable(False)
self.home = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Home")
widgetsizer.Add(self.home, 1, wx.ALL, 15)
self.home.Bind(wx.EVT_BUTTON, self.homeButton)
self.widget_panel.SetSizer(widgetsizer)
self.home.Enable(False)
self.pan = wx.ToggleButton(self.widget_panel, id=wx.ID_ANY, label="Pan")
widgetsizer.Add(self.pan, 1, wx.ALL, 15)
self.pan.Bind(wx.EVT_TOGGLEBUTTON, self.panButton)
self.widget_panel.SetSizer(widgetsizer)
self.pan.Enable(False)
self.lock = wx.CheckBox(self.widget_panel, id=wx.ID_ANY, label="Lock View")
widgetsizer.Add(self.lock, 1, wx.ALL, 15)
self.lock.Bind(wx.EVT_CHECKBOX, self.lockChecked)
self.widget_panel.SetSizer(widgetsizer)
self.lock.Enable(False)
self.save = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Save")
widgetsizer.Add(self.save, 1, wx.ALL, 15)
self.save.Bind(wx.EVT_BUTTON, self.saveDataSet)
self.save.Enable(False)
widgetsizer.AddStretchSpacer(15)
self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Quit")
widgetsizer.Add(self.quit, 1, wx.ALL | wx.ALIGN_RIGHT, 15)
self.quit.Bind(wx.EVT_BUTTON, self.quitButton)
self.widget_panel.SetSizer(widgetsizer)
self.widget_panel.SetSizerAndFit(widgetsizer)
self.widget_panel.Layout()
###############################################################################################################################
# Variables initialization
self.currentDirectory = os.getcwd()
self.index = []
self.iter = []
self.file = 0
self.updatedCoords = []
self.dataFrame = None
self.config_file = config
self.new_labels = False
self.buttonCounter = []
self.bodyparts2plot = []
self.drs = []
self.num = []
self.view_locked = False
# Workaround for MAC - xlim and ylim changed events seem to be triggered too often so need to make sure that the
# xlim and ylim have actually changed before turning zoom off
self.prezoom_xlim = []
self.prezoom_ylim = []
|
def __init__(self, parent, config, imtypes):
# Settting the GUI size and panels design
displays = (
wx.Display(i) for i in range(wx.Display.GetCount())
) # Gets the number of displays
screenSizes = [
display.GetGeometry().GetSize() for display in displays
] # Gets the size of each display
index = 0 # For display 1.
screenWidth = screenSizes[index][0]
screenHeight = screenSizes[index][1]
self.gui_size = (screenWidth * 0.7, screenHeight * 0.85)
self.imtypes = imtypes # imagetypes to look for in folder e.g. *.png
wx.Frame.__init__(
self,
parent,
id=wx.ID_ANY,
title="DeepLabCut2.0 - Labeling ToolBox",
size=wx.Size(self.gui_size),
pos=wx.DefaultPosition,
style=wx.RESIZE_BORDER | wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL,
)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText(
"Looking for a folder to start labeling. Click 'Load frames' to begin."
)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyPressed)
self.SetSizeHints(
wx.Size(self.gui_size)
) # This sets the minimum size of the GUI. It can scale now!
###################################################################################################################################################
# Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!
topSplitter = wx.SplitterWindow(self)
vSplitter = wx.SplitterWindow(topSplitter)
self.image_panel = ImagePanel(vSplitter, config, self.gui_size)
self.choice_panel = ScrollPanel(vSplitter)
vSplitter.SplitVertically(
self.image_panel, self.choice_panel, sashPosition=self.gui_size[0] * 0.8
)
vSplitter.SetSashGravity(1)
self.widget_panel = WidgetPanel(topSplitter)
topSplitter.SplitHorizontally(
vSplitter, self.widget_panel, sashPosition=self.gui_size[1] * 0.83
) # 0.9
topSplitter.SetSashGravity(1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(topSplitter, 1, wx.EXPAND)
self.SetSizer(sizer)
###################################################################################################################################################
# Add Buttons to the WidgetPanel and bind them to their respective functions.
widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)
self.load = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Load frames")
widgetsizer.Add(self.load, 1, wx.ALL, 15)
self.load.Bind(wx.EVT_BUTTON, self.browseDir)
self.prev = wx.Button(self.widget_panel, id=wx.ID_ANY, label="<<Previous")
widgetsizer.Add(self.prev, 1, wx.ALL, 15)
self.prev.Bind(wx.EVT_BUTTON, self.prevImage)
self.prev.Enable(False)
self.next = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Next>>")
widgetsizer.Add(self.next, 1, wx.ALL, 15)
self.next.Bind(wx.EVT_BUTTON, self.nextImage)
self.next.Enable(False)
self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Help")
widgetsizer.Add(self.help, 1, wx.ALL, 15)
self.help.Bind(wx.EVT_BUTTON, self.helpButton)
self.help.Enable(True)
#
self.zoom = wx.ToggleButton(self.widget_panel, label="Zoom")
widgetsizer.Add(self.zoom, 1, wx.ALL, 15)
self.zoom.Bind(wx.EVT_TOGGLEBUTTON, self.zoomButton)
self.widget_panel.SetSizer(widgetsizer)
self.zoom.Enable(False)
self.home = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Home")
widgetsizer.Add(self.home, 1, wx.ALL, 15)
self.home.Bind(wx.EVT_BUTTON, self.homeButton)
self.widget_panel.SetSizer(widgetsizer)
self.home.Enable(False)
self.pan = wx.ToggleButton(self.widget_panel, id=wx.ID_ANY, label="Pan")
widgetsizer.Add(self.pan, 1, wx.ALL, 15)
self.pan.Bind(wx.EVT_TOGGLEBUTTON, self.panButton)
self.widget_panel.SetSizer(widgetsizer)
self.pan.Enable(False)
self.lock = wx.CheckBox(self.widget_panel, id=wx.ID_ANY, label="Lock View")
widgetsizer.Add(self.lock, 1, wx.ALL, 15)
self.lock.Bind(wx.EVT_CHECKBOX, self.lockChecked)
self.widget_panel.SetSizer(widgetsizer)
self.lock.Enable(False)
self.save = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Save")
widgetsizer.Add(self.save, 1, wx.ALL, 15)
self.save.Bind(wx.EVT_BUTTON, self.saveDataSet)
self.save.Enable(False)
self.delete = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Delete Frame")
widgetsizer.Add(self.delete, 1, wx.ALL, 15)
self.delete.Bind(wx.EVT_BUTTON, self.deleteImage)
self.delete.Enable(False)
widgetsizer.AddStretchSpacer(15)
self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Quit")
widgetsizer.Add(self.quit, 1, wx.ALL | wx.ALIGN_RIGHT, 15)
self.quit.Bind(wx.EVT_BUTTON, self.quitButton)
self.widget_panel.SetSizer(widgetsizer)
self.widget_panel.SetSizerAndFit(widgetsizer)
self.widget_panel.Layout()
###############################################################################################################################
# Variables initialization
self.currentDirectory = os.getcwd()
self.index = []
self.iter = []
self.file = 0
self.updatedCoords = []
self.dataFrame = None
self.config_file = config
self.new_labels = False
self.buttonCounter = []
self.bodyparts2plot = []
self.drs = []
self.num = []
self.view_locked = False
# Workaround for MAC - xlim and ylim changed events seem to be triggered too often so need to make sure that the
# xlim and ylim have actually changed before turning zoom off
self.prezoom_xlim = []
self.prezoom_ylim = []
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def browseDir(self, event):
"""
Show the DirDialog and ask the user to change the directory where machine labels are stored
"""
self.statusbar.SetStatusText("Looking for a folder to start labeling...")
cwd = os.path.join(os.getcwd(), "labeled-data")
dlg = wx.DirDialog(
self,
"Choose the directory where your extracted frames are saved:",
cwd,
style=wx.DD_DEFAULT_STYLE,
)
if dlg.ShowModal() == wx.ID_OK:
self.dir = dlg.GetPath()
self.load.Enable(False)
self.next.Enable(True)
self.save.Enable(True)
else:
dlg.Destroy()
self.Close(True)
dlg.Destroy()
# Enabling the zoom, pan and home buttons
self.zoom.Enable(True)
self.home.Enable(True)
self.pan.Enable(True)
self.lock.Enable(True)
# Reading config file and its variables
self.cfg = auxiliaryfunctions.read_config(self.config_file)
self.scorer = self.cfg["scorer"]
self.bodyparts = self.cfg["bodyparts"]
self.videos = self.cfg["video_sets"].keys()
self.markerSize = self.cfg["dotsize"]
self.alpha = self.cfg["alphavalue"]
self.colormap = plt.get_cmap(self.cfg["colormap"])
self.colormap = self.colormap.reversed()
self.project_path = self.cfg["project_path"]
imlist = []
for imtype in self.imtypes:
imlist.extend(
[
fn
for fn in glob.glob(os.path.join(self.dir, imtype))
if ("labeled.png" not in fn)
]
)
if len(imlist) == 0:
print("No images found!!")
self.index = np.sort(imlist)
self.statusbar.SetStatusText(
"Working on folder: {}".format(os.path.split(str(self.dir))[-1])
)
self.relativeimagenames = [
"labeled" + n.split("labeled")[1] for n in self.index
] # [n.split(self.project_path+'/')[1] for n in self.index]
# Reading the existing dataset,if already present
try:
self.dataFrame = pd.read_hdf(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5"),
"df_with_missing",
)
self.dataFrame.sort_index(inplace=True)
self.prev.Enable(True)
# Finds the first empty row in the dataframe and sets the iteration to that index
for idx, j in enumerate(self.dataFrame.index):
values = self.dataFrame.loc[j, :].values
if np.prod(np.isnan(values)) == 1:
self.iter = idx
break
else:
self.iter = 0
except:
a = np.empty((len(self.index), 2))
a[:] = np.nan
for bodypart in self.bodyparts:
index = pd.MultiIndex.from_product(
[[self.scorer], [bodypart], ["x", "y"]],
names=["scorer", "bodyparts", "coords"],
)
frame = pd.DataFrame(a, columns=index, index=self.relativeimagenames)
self.dataFrame = pd.concat([self.dataFrame, frame], axis=1)
self.iter = 0
# Reading the image name
self.img = self.dataFrame.index[self.iter]
img_name = Path(self.img).name
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.bodyparts
)
# Checking for new frames and adding them to the existing dataframe
old_imgs = np.sort(list(self.dataFrame.index))
self.newimages = list(set(self.relativeimagenames) - set(old_imgs))
if self.newimages == []:
pass
else:
print("Found new frames..")
# Create an empty dataframe with all the new images and then merge this to the existing dataframe.
self.df = None
a = np.empty((len(self.newimages), 2))
a[:] = np.nan
for bodypart in self.bodyparts:
index = pd.MultiIndex.from_product(
[[self.scorer], [bodypart], ["x", "y"]],
names=["scorer", "bodyparts", "coords"],
)
frame = pd.DataFrame(a, columns=index, index=self.newimages)
self.df = pd.concat([self.df, frame], axis=1)
self.dataFrame = pd.concat([self.dataFrame, self.df], axis=0)
# Sort it by the index values
self.dataFrame.sort_index(inplace=True)
# checks for unique bodyparts
if len(self.bodyparts) != len(set(self.bodyparts)):
print(
"Error - bodyparts must have unique labels! Please choose unique bodyparts in config.yaml file and try again. Quitting for now!"
)
self.Close(True)
# Extracting the list of new labels
oldBodyParts = self.dataFrame.columns.get_level_values(1)
_, idx = np.unique(oldBodyParts, return_index=True)
oldbodyparts2plot = list(oldBodyParts[np.sort(idx)])
self.new_bodyparts = [x for x in self.bodyparts if x not in oldbodyparts2plot]
# Checking if user added a new label
if self.new_bodyparts == []: # i.e. no new label
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
) = self.image_panel.drawplot(
self.img, img_name, self.iter, self.index, self.bodyparts, self.colormap
)
self.axes.callbacks.connect("xlim_changed", self.onZoom)
self.axes.callbacks.connect("ylim_changed", self.onZoom)
(
self.choiceBox,
self.rdb,
self.slider,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.bodyparts, self.file, self.markerSize
)
self.buttonCounter = MainFrame.plot(self, self.img)
self.cidClick = self.canvas.mpl_connect("button_press_event", self.onClick)
self.canvas.mpl_connect("button_release_event", self.onButtonRelease)
else:
dlg = wx.MessageDialog(
None,
"New label found in the config file. Do you want to see all the other labels?",
"New label found",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_NO:
self.bodyparts = self.new_bodyparts
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.bodyparts
)
a = np.empty((len(self.index), 2))
a[:] = np.nan
for bodypart in self.new_bodyparts:
index = pd.MultiIndex.from_product(
[[self.scorer], [bodypart], ["x", "y"]],
names=["scorer", "bodyparts", "coords"],
)
frame = pd.DataFrame(a, columns=index, index=self.relativeimagenames)
self.dataFrame = pd.concat([self.dataFrame, frame], axis=1)
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
) = self.image_panel.drawplot(
self.img, img_name, self.iter, self.index, self.bodyparts, self.colormap
)
self.axes.callbacks.connect("xlim_changed", self.onZoom)
self.axes.callbacks.connect("ylim_changed", self.onZoom)
(
self.choiceBox,
self.rdb,
self.slider,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.bodyparts, self.file, self.markerSize
)
self.cidClick = self.canvas.mpl_connect("button_press_event", self.onClick)
self.canvas.mpl_connect("button_release_event", self.onButtonRelease)
self.buttonCounter = MainFrame.plot(self, self.img)
self.checkBox.Bind(wx.EVT_CHECKBOX, self.activateSlider)
self.slider.Bind(wx.EVT_SLIDER, self.OnSliderScroll)
|
def browseDir(self, event):
"""
Show the DirDialog and ask the user to change the directory where machine labels are stored
"""
self.statusbar.SetStatusText("Looking for a folder to start labeling...")
cwd = os.path.join(os.getcwd(), "labeled-data")
dlg = wx.DirDialog(
self,
"Choose the directory where your extracted frames are saved:",
cwd,
style=wx.DD_DEFAULT_STYLE,
)
if dlg.ShowModal() == wx.ID_OK:
self.dir = dlg.GetPath()
self.load.Enable(False)
self.next.Enable(True)
self.save.Enable(True)
else:
dlg.Destroy()
self.Close(True)
dlg.Destroy()
# Enabling the zoom, pan and home buttons
self.zoom.Enable(True)
self.home.Enable(True)
self.pan.Enable(True)
self.lock.Enable(True)
self.delete.Enable(True)
# Reading config file and its variables
self.cfg = auxiliaryfunctions.read_config(self.config_file)
self.scorer = self.cfg["scorer"]
self.bodyparts = self.cfg["bodyparts"]
self.videos = self.cfg["video_sets"].keys()
self.markerSize = self.cfg["dotsize"]
self.alpha = self.cfg["alphavalue"]
self.colormap = plt.get_cmap(self.cfg["colormap"])
self.colormap = self.colormap.reversed()
self.project_path = self.cfg["project_path"]
imlist = []
for imtype in self.imtypes:
imlist.extend(
[
fn
for fn in glob.glob(os.path.join(self.dir, imtype))
if ("labeled.png" not in fn)
]
)
if len(imlist) == 0:
print("No images found!!")
self.index = np.sort(imlist)
self.statusbar.SetStatusText(
"Working on folder: {}".format(os.path.split(str(self.dir))[-1])
)
self.relativeimagenames = [
"labeled" + n.split("labeled")[1] for n in self.index
] # [n.split(self.project_path+'/')[1] for n in self.index]
# Reading the existing dataset,if already present
try:
self.dataFrame = pd.read_hdf(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5"),
"df_with_missing",
)
self.dataFrame.sort_index(inplace=True)
self.prev.Enable(True)
# Finds the first empty row in the dataframe and sets the iteration to that index
for idx, j in enumerate(self.dataFrame.index):
values = self.dataFrame.loc[j, :].values
if np.prod(np.isnan(values)) == 1:
self.iter = idx
break
else:
self.iter = 0
except:
a = np.empty((len(self.index), 2))
a[:] = np.nan
for bodypart in self.bodyparts:
index = pd.MultiIndex.from_product(
[[self.scorer], [bodypart], ["x", "y"]],
names=["scorer", "bodyparts", "coords"],
)
frame = pd.DataFrame(a, columns=index, index=self.relativeimagenames)
self.dataFrame = pd.concat([self.dataFrame, frame], axis=1)
self.iter = 0
# Reading the image name
self.img = self.dataFrame.index[self.iter]
img_name = Path(self.img).name
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.bodyparts
)
# Checking for new frames and adding them to the existing dataframe
old_imgs = np.sort(list(self.dataFrame.index))
self.newimages = list(set(self.relativeimagenames) - set(old_imgs))
if self.newimages == []:
pass
else:
print("Found new frames..")
# Create an empty dataframe with all the new images and then merge this to the existing dataframe.
self.df = None
a = np.empty((len(self.newimages), 2))
a[:] = np.nan
for bodypart in self.bodyparts:
index = pd.MultiIndex.from_product(
[[self.scorer], [bodypart], ["x", "y"]],
names=["scorer", "bodyparts", "coords"],
)
frame = pd.DataFrame(a, columns=index, index=self.newimages)
self.df = pd.concat([self.df, frame], axis=1)
self.dataFrame = pd.concat([self.dataFrame, self.df], axis=0)
# Sort it by the index values
self.dataFrame.sort_index(inplace=True)
# checks for unique bodyparts
if len(self.bodyparts) != len(set(self.bodyparts)):
print(
"Error - bodyparts must have unique labels! Please choose unique bodyparts in config.yaml file and try again. Quitting for now!"
)
self.Close(True)
# Extracting the list of new labels
oldBodyParts = self.dataFrame.columns.get_level_values(1)
_, idx = np.unique(oldBodyParts, return_index=True)
oldbodyparts2plot = list(oldBodyParts[np.sort(idx)])
self.new_bodyparts = [x for x in self.bodyparts if x not in oldbodyparts2plot]
# Checking if user added a new label
if self.new_bodyparts == []: # i.e. no new label
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
) = self.image_panel.drawplot(
self.img, img_name, self.iter, self.index, self.bodyparts, self.colormap
)
self.axes.callbacks.connect("xlim_changed", self.onZoom)
self.axes.callbacks.connect("ylim_changed", self.onZoom)
(
self.choiceBox,
self.rdb,
self.slider,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.bodyparts, self.file, self.markerSize
)
self.buttonCounter = MainFrame.plot(self, self.img)
self.cidClick = self.canvas.mpl_connect("button_press_event", self.onClick)
self.canvas.mpl_connect("button_release_event", self.onButtonRelease)
else:
dlg = wx.MessageDialog(
None,
"New label found in the config file. Do you want to see all the other labels?",
"New label found",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_NO:
self.bodyparts = self.new_bodyparts
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.bodyparts
)
a = np.empty((len(self.index), 2))
a[:] = np.nan
for bodypart in self.new_bodyparts:
index = pd.MultiIndex.from_product(
[[self.scorer], [bodypart], ["x", "y"]],
names=["scorer", "bodyparts", "coords"],
)
frame = pd.DataFrame(a, columns=index, index=self.relativeimagenames)
self.dataFrame = pd.concat([self.dataFrame, frame], axis=1)
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
) = self.image_panel.drawplot(
self.img, img_name, self.iter, self.index, self.bodyparts, self.colormap
)
self.axes.callbacks.connect("xlim_changed", self.onZoom)
self.axes.callbacks.connect("ylim_changed", self.onZoom)
(
self.choiceBox,
self.rdb,
self.slider,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.bodyparts, self.file, self.markerSize
)
self.cidClick = self.canvas.mpl_connect("button_press_event", self.onClick)
self.canvas.mpl_connect("button_release_event", self.onButtonRelease)
self.buttonCounter = MainFrame.plot(self, self.img)
self.checkBox.Bind(wx.EVT_CHECKBOX, self.activateSlider)
self.slider.Bind(wx.EVT_SLIDER, self.OnSliderScroll)
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def saveDataSet(self, event):
"""
Saves the final dataframe
"""
self.statusbar.SetStatusText("File saved")
MainFrame.saveEachImage(self)
MainFrame.updateZoomPan(self)
# Windows compatible
self.dataFrame.sort_index(inplace=True)
self.dataFrame = self.dataFrame.reindex(
self.bodyparts,
axis=1,
level=self.dataFrame.columns.names.index("bodyparts"),
)
self.dataFrame.to_csv(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".csv")
)
self.dataFrame.to_hdf(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5"),
"df_with_missing",
format="table",
mode="w",
)
|
def saveDataSet(self, event):
"""
Saves the final dataframe
"""
# Backup previous save
from sys import platform
csv_path = os.path.join(self.dir, "CollectedData_" + self.scorer + ".csv")
hdf_path = os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5")
csv_backup_path = csv_path.replace(".csv", ".csv.backup")
hdf_backup_path = hdf_path.replace(".h5", ".h5.backup")
if platform == "linux" or platform == "linux2":
if os.path.exists(csv_path):
os.rename(csv_path, csv_backup_path)
if os.path.exists(hdf_path):
os.rename(hdf_path, hdf_backup_path)
elif platform == "win32":
if os.path.exists(csv_path):
if os.path.exists(csv_backup_path): # check if backupfile exists already
os.remove(
csv_backup_path
) # requires double action as windows fails to rename file if exists already
os.rename(csv_path, csv_backup_path)
if os.path.exists(hdf_path):
if os.path.exists(hdf_backup_path):
os.remove(hdf_backup_path)
os.rename(hdf_path, hdf_backup_path)
elif platform == "darwin":
try:
if os.path.exists(csv_path):
os.rename(csv_path, csv_backup_path)
if os.path.exists(hdf_path):
os.rename(hdf_path, hdf_backup_path)
except:
print(" Unexpected os.rename behaviour, try win32 approach")
self.statusbar.SetStatusText("File saved")
MainFrame.saveEachImage(self)
MainFrame.updateZoomPan(self)
# Drop Nan data frames
self.dataFrame = self.dataFrame.dropna(how="all")
# Windows compatible
self.dataFrame.sort_index(inplace=True)
self.dataFrame = self.dataFrame.reindex(
self.bodyparts,
axis=1,
level=self.dataFrame.columns.names.index("bodyparts"),
)
self.dataFrame.to_csv(csv_path)
self.dataFrame.to_hdf(hdf_path, "df_with_missing", format="table", mode="w")
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def __init__(self, parent, config):
# Settting the GUI size and panels design
displays = (
wx.Display(i) for i in range(wx.Display.GetCount())
) # Gets the number of displays
screenSizes = [
display.GetGeometry().GetSize() for display in displays
] # Gets the size of each display
index = 0 # For display 1.
screenWidth = screenSizes[index][0]
screenHeight = screenSizes[index][1]
self.gui_size = (screenWidth * 0.7, screenHeight * 0.85)
wx.Frame.__init__(
self,
parent,
id=wx.ID_ANY,
title="DeepLabCut2.0 - Multiple Individuals Labeling ToolBox",
size=wx.Size(self.gui_size),
pos=wx.DefaultPosition,
style=wx.RESIZE_BORDER | wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL,
)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText(
"Looking for a folder to start labeling. Click 'Load frames' to begin."
)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyPressed)
self.SetSizeHints(
wx.Size(self.gui_size)
) # This sets the minimum size of the GUI. It can scale now!
###################################################################################################################################################
# Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!
topSplitter = wx.SplitterWindow(self)
vSplitter = wx.SplitterWindow(topSplitter)
self.image_panel = ImagePanel(vSplitter, config, self.gui_size)
self.choice_panel = ScrollPanel(vSplitter)
vSplitter.SplitVertically(
self.image_panel, self.choice_panel, sashPosition=self.gui_size[0] * 0.8
)
vSplitter.SetSashGravity(1)
self.widget_panel = WidgetPanel(topSplitter)
topSplitter.SplitHorizontally(
vSplitter, self.widget_panel, sashPosition=self.gui_size[1] * 0.83
) # 0.9
topSplitter.SetSashGravity(1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(topSplitter, 1, wx.EXPAND)
self.SetSizer(sizer)
###################################################################################################################################################
# Add Buttons to the WidgetPanel and bind them to their respective functions.
widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)
self.load = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Load frames")
widgetsizer.Add(self.load, 1, wx.ALL, 15)
self.load.Bind(wx.EVT_BUTTON, self.browseDir)
self.prev = wx.Button(self.widget_panel, id=wx.ID_ANY, label="<<Previous")
widgetsizer.Add(self.prev, 1, wx.ALL, 15)
self.prev.Bind(wx.EVT_BUTTON, self.prevImage)
self.prev.Enable(False)
self.next = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Next>>")
widgetsizer.Add(self.next, 1, wx.ALL, 15)
self.next.Bind(wx.EVT_BUTTON, self.nextImage)
self.next.Enable(False)
self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Help")
widgetsizer.Add(self.help, 1, wx.ALL, 15)
self.help.Bind(wx.EVT_BUTTON, self.helpButton)
self.help.Enable(True)
self.zoom = wx.ToggleButton(self.widget_panel, label="Zoom")
widgetsizer.Add(self.zoom, 1, wx.ALL, 15)
self.zoom.Bind(wx.EVT_TOGGLEBUTTON, self.zoomButton)
self.widget_panel.SetSizer(widgetsizer)
self.zoom.Enable(False)
self.home = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Home")
widgetsizer.Add(self.home, 1, wx.ALL, 15)
self.home.Bind(wx.EVT_BUTTON, self.homeButton)
self.widget_panel.SetSizer(widgetsizer)
self.home.Enable(False)
self.pan = wx.ToggleButton(self.widget_panel, id=wx.ID_ANY, label="Pan")
widgetsizer.Add(self.pan, 1, wx.ALL, 15)
self.pan.Bind(wx.EVT_TOGGLEBUTTON, self.panButton)
self.widget_panel.SetSizer(widgetsizer)
self.pan.Enable(False)
self.lock = wx.CheckBox(self.widget_panel, id=wx.ID_ANY, label="Lock View")
widgetsizer.Add(self.lock, 1, wx.ALL, 15)
self.lock.Bind(wx.EVT_CHECKBOX, self.lockChecked)
self.widget_panel.SetSizer(widgetsizer)
self.lock.Enable(False)
self.save = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Save")
widgetsizer.Add(self.save, 1, wx.ALL, 15)
self.save.Bind(wx.EVT_BUTTON, self.saveDataSet)
self.save.Enable(False)
widgetsizer.AddStretchSpacer(15)
self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Quit")
widgetsizer.Add(self.quit, 1, wx.ALL | wx.ALIGN_RIGHT, 15)
self.quit.Bind(wx.EVT_BUTTON, self.quitButton)
self.widget_panel.SetSizer(widgetsizer)
self.widget_panel.SetSizerAndFit(widgetsizer)
self.widget_panel.Layout()
###############################################################################################################################
# Variables initialization
self.currentDirectory = os.getcwd()
self.index = []
self.iter = []
self.file = 0
self.updatedCoords = []
self.dataFrame = None
self.flag = True
self.config_file = config
self.new_labels = False
self.buttonCounter = []
self.bodyparts2plot = []
self.drs = []
self.num = []
self.are_unique_bodyparts_present = True
self.view_locked = False
# Workaround for MAC - xlim and ylim changed events seem to be triggered too often so need to make sure that the
# xlim and ylim have actually changed before turning zoom off
self.prezoom_xlim = []
self.prezoom_ylim = []
|
def __init__(self, parent, config):
# Settting the GUI size and panels design
displays = (
wx.Display(i) for i in range(wx.Display.GetCount())
) # Gets the number of displays
screenSizes = [
display.GetGeometry().GetSize() for display in displays
] # Gets the size of each display
index = 0 # For display 1.
screenWidth = screenSizes[index][0]
screenHeight = screenSizes[index][1]
self.gui_size = (screenWidth * 0.7, screenHeight * 0.85)
wx.Frame.__init__(
self,
parent,
id=wx.ID_ANY,
title="DeepLabCut2.0 - Multiple Individuals Labeling ToolBox",
size=wx.Size(self.gui_size),
pos=wx.DefaultPosition,
style=wx.RESIZE_BORDER | wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL,
)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText(
"Looking for a folder to start labeling. Click 'Load frames' to begin."
)
self.Bind(wx.EVT_CHAR_HOOK, self.OnKeyPressed)
self.SetSizeHints(
wx.Size(self.gui_size)
) # This sets the minimum size of the GUI. It can scale now!
###################################################################################################################################################
# Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!
topSplitter = wx.SplitterWindow(self)
vSplitter = wx.SplitterWindow(topSplitter)
self.image_panel = ImagePanel(vSplitter, config, self.gui_size)
self.choice_panel = ScrollPanel(vSplitter)
vSplitter.SplitVertically(
self.image_panel, self.choice_panel, sashPosition=self.gui_size[0] * 0.8
)
vSplitter.SetSashGravity(1)
self.widget_panel = WidgetPanel(topSplitter)
topSplitter.SplitHorizontally(
vSplitter, self.widget_panel, sashPosition=self.gui_size[1] * 0.83
) # 0.9
topSplitter.SetSashGravity(1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(topSplitter, 1, wx.EXPAND)
self.SetSizer(sizer)
###################################################################################################################################################
# Add Buttons to the WidgetPanel and bind them to their respective functions.
widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)
self.load = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Load frames")
widgetsizer.Add(self.load, 1, wx.ALL, 15)
self.load.Bind(wx.EVT_BUTTON, self.browseDir)
self.prev = wx.Button(self.widget_panel, id=wx.ID_ANY, label="<<Previous")
widgetsizer.Add(self.prev, 1, wx.ALL, 15)
self.prev.Bind(wx.EVT_BUTTON, self.prevImage)
self.prev.Enable(False)
self.next = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Next>>")
widgetsizer.Add(self.next, 1, wx.ALL, 15)
self.next.Bind(wx.EVT_BUTTON, self.nextImage)
self.next.Enable(False)
self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Help")
widgetsizer.Add(self.help, 1, wx.ALL, 15)
self.help.Bind(wx.EVT_BUTTON, self.helpButton)
self.help.Enable(True)
self.zoom = wx.ToggleButton(self.widget_panel, label="Zoom")
widgetsizer.Add(self.zoom, 1, wx.ALL, 15)
self.zoom.Bind(wx.EVT_TOGGLEBUTTON, self.zoomButton)
self.widget_panel.SetSizer(widgetsizer)
self.zoom.Enable(False)
self.home = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Home")
widgetsizer.Add(self.home, 1, wx.ALL, 15)
self.home.Bind(wx.EVT_BUTTON, self.homeButton)
self.widget_panel.SetSizer(widgetsizer)
self.home.Enable(False)
self.pan = wx.ToggleButton(self.widget_panel, id=wx.ID_ANY, label="Pan")
widgetsizer.Add(self.pan, 1, wx.ALL, 15)
self.pan.Bind(wx.EVT_TOGGLEBUTTON, self.panButton)
self.widget_panel.SetSizer(widgetsizer)
self.pan.Enable(False)
self.lock = wx.CheckBox(self.widget_panel, id=wx.ID_ANY, label="Lock View")
widgetsizer.Add(self.lock, 1, wx.ALL, 15)
self.lock.Bind(wx.EVT_CHECKBOX, self.lockChecked)
self.widget_panel.SetSizer(widgetsizer)
self.lock.Enable(False)
self.save = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Save")
widgetsizer.Add(self.save, 1, wx.ALL, 15)
self.save.Bind(wx.EVT_BUTTON, self.saveDataSet)
self.save.Enable(False)
self.delete = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Delete Frame")
widgetsizer.Add(self.delete, 1, wx.ALL, 15)
self.delete.Bind(wx.EVT_BUTTON, self.deleteImage)
self.delete.Enable(False)
widgetsizer.AddStretchSpacer(15)
self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Quit")
widgetsizer.Add(self.quit, 1, wx.ALL | wx.ALIGN_RIGHT, 15)
self.quit.Bind(wx.EVT_BUTTON, self.quitButton)
self.widget_panel.SetSizer(widgetsizer)
self.widget_panel.SetSizerAndFit(widgetsizer)
self.widget_panel.Layout()
###############################################################################################################################
# Variables initialization
self.currentDirectory = os.getcwd()
self.index = []
self.iter = []
self.file = 0
self.updatedCoords = []
self.dataFrame = None
self.flag = True
self.config_file = config
self.new_labels = False
self.buttonCounter = []
self.bodyparts2plot = []
self.drs = []
self.num = []
self.are_unique_bodyparts_present = True
self.view_locked = False
# Workaround for MAC - xlim and ylim changed events seem to be triggered too often so need to make sure that the
# xlim and ylim have actually changed before turning zoom off
self.prezoom_xlim = []
self.prezoom_ylim = []
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def browseDir(self, event):
"""
Show the DirDialog and ask the user to change the directory where machine labels are stored
"""
self.statusbar.SetStatusText("Looking for a folder to start labeling...")
cwd = os.path.join(os.getcwd(), "labeled-data")
dlg = wx.DirDialog(
self,
"Choose the directory where your extracted frames are saved:",
cwd,
style=wx.DD_DEFAULT_STYLE,
)
if dlg.ShowModal() == wx.ID_OK:
self.dir = dlg.GetPath()
self.load.Enable(False)
self.next.Enable(True)
self.save.Enable(True)
else:
dlg.Destroy()
self.Close(True)
return
dlg.Destroy()
# Enabling the zoom, pan and home buttons
self.zoom.Enable(True)
self.home.Enable(True)
self.pan.Enable(True)
self.lock.Enable(True)
# Reading config file and its variables
self.cfg = auxiliaryfunctions.read_config(self.config_file)
self.scorer = self.cfg["scorer"]
(
individuals,
uniquebodyparts,
multianimalbodyparts,
) = auxfun_multianimal.extractindividualsandbodyparts(self.cfg)
self.multibodyparts = multianimalbodyparts
# checks for unique bodyparts
if len(self.multibodyparts) != len(set(self.multibodyparts)):
print(
"Error - bodyparts must have unique labels! Please choose unique bodyparts in config.yaml file and try again. Quitting for now!"
)
self.Close(True)
self.uniquebodyparts = uniquebodyparts
self.individual_names = individuals
self.videos = self.cfg["video_sets"].keys()
self.markerSize = self.cfg["dotsize"]
self.edgewidth = self.markerSize // 3
self.alpha = self.cfg["alphavalue"]
self.colormap = plt.get_cmap(self.cfg["colormap"])
self.colormap = self.colormap.reversed()
self.idmap = plt.cm.get_cmap("Set1", len(individuals))
self.project_path = self.cfg["project_path"]
if self.uniquebodyparts == []:
self.are_unique_bodyparts_present = False
self.buttonCounter = {i: [] for i in self.individual_names}
self.index = np.sort(
[
fn
for fn in glob.glob(os.path.join(self.dir, "*.png"))
if ("labeled.png" not in fn)
]
)
self.statusbar.SetStatusText(
"Working on folder: {}".format(os.path.split(str(self.dir))[-1])
)
self.relativeimagenames = [
"labeled" + n.split("labeled")[1] for n in self.index
] # [n.split(self.project_path+'/')[1] for n in self.index]
# Reading the existing dataset,if already present
try:
self.dataFrame = pd.read_hdf(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5"),
"df_with_missing",
)
# Handle data previously labeled on a different platform
sep = "/" if "/" in self.dataFrame.index[0] else "\\"
if sep != os.path.sep:
self.dataFrame.index = self.dataFrame.index.str.replace(sep, os.path.sep)
self.dataFrame.sort_index(inplace=True)
self.prev.Enable(True)
# Finds the first empty row in the dataframe and sets the iteration to that index
self.iter = np.argmax(np.isnan(self.dataFrame.values).all(axis=1))
except FileNotFoundError:
# Create an empty data frame
self.dataFrame = MainFrame.create_dataframe(
self,
self.dataFrame,
self.relativeimagenames,
self.individual_names,
self.uniquebodyparts,
self.multibodyparts,
)
self.iter = 0
# Cache original bodyparts
self._old_multi = (
self.dataFrame.xs(self.individual_names[0], axis=1, level="individuals")
.columns.get_level_values("bodyparts")
.unique()
.to_list()
)
self._old_unique = (
self.dataFrame.loc[
:, self.dataFrame.columns.get_level_values("individuals") == "single"
]
.columns.get_level_values("bodyparts")
.unique()
.to_list()
)
# Reading the image name
self.img = self.index[self.iter]
img_name = Path(self.index[self.iter]).name
# Checking for new frames and adding them to the existing dataframe
old_imgs = np.sort(list(self.dataFrame.index))
self.newimages = list(set(self.relativeimagenames) - set(old_imgs))
if self.newimages:
print("Found new frames..")
# Create an empty dataframe with all the new images and then merge this to the existing dataframe.
self.df = MainFrame.create_dataframe(
self,
None,
self.newimages,
self.individual_names,
self.uniquebodyparts,
self.multibodyparts,
)
self.dataFrame = pd.concat([self.dataFrame, self.df], axis=0)
self.dataFrame.sort_index(inplace=True)
# Rearrange bodypart columns in config order
bodyparts = self.multibodyparts + self.uniquebodyparts
self.dataFrame.reindex(
bodyparts, axis=1, level=self.dataFrame.columns.names.index("bodyparts")
)
# Test whether there are missing frames and superfluous data
if len(old_imgs) > len(self.relativeimagenames):
missing_frames = set(old_imgs).difference(self.relativeimagenames)
self.dataFrame.drop(missing_frames, inplace=True)
# Check whether new labels were added
self.new_multi = [x for x in self.multibodyparts if x not in self._old_multi]
self.new_unique = [x for x in self.uniquebodyparts if x not in self._old_unique]
# Checking if user added a new label
if not any([self.new_multi, self.new_unique]): # i.e. no new labels
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
self.image_axis,
) = self.image_panel.drawplot(
self.img,
img_name,
self.iter,
self.index,
self.multibodyparts,
self.colormap,
keep_view=self.view_locked,
)
else:
# Found new labels in either multiple bodyparts or unique bodyparts
dlg = wx.MessageDialog(
None,
"New label found in the config file. Do you want to see all the other labels?",
"New label found",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_NO:
if self.new_multi:
self.multibodyparts = self.new_multi
if self.new_unique:
self.uniquebodyparts = self.new_unique
self.dataFrame = MainFrame.create_dataframe(
self,
self.dataFrame,
self.relativeimagenames,
self.individual_names,
self.new_unique,
self.new_multi,
)
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
self.image_axis,
) = self.image_panel.drawplot(
self.img,
img_name,
self.iter,
self.index,
self.multibodyparts,
self.colormap,
keep_view=self.view_locked,
)
self.axes.callbacks.connect("xlim_changed", self.onZoom)
self.axes.callbacks.connect("ylim_changed", self.onZoom)
if self.individual_names[0] == "single":
(
self.choiceBox,
self.individualrdb,
self.rdb,
self.change_marker_size,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.uniquebodyparts, self.individual_names, self.file, self.markerSize
)
self.image_panel.addcolorbar(
self.img,
self.image_axis,
self.iter,
self.uniquebodyparts,
self.colormap,
)
else:
(
self.choiceBox,
self.individualrdb,
self.rdb,
self.change_marker_size,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.multibodyparts, self.individual_names, self.file, self.markerSize
)
self.image_panel.addcolorbar(
self.img, self.image_axis, self.iter, self.multibodyparts, self.colormap
)
self.individualrdb.Bind(wx.EVT_RADIOBOX, self.select_individual)
# check if single is slected when radio buttons are changed
if self.individualrdb.GetStringSelection() == "single":
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.uniquebodyparts
)
else:
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.multibodyparts
)
self.buttonCounter = MainFrame.plot(self, self.img)
self.cidClick = self.canvas.mpl_connect("button_press_event", self.onClick)
self.checkBox.Bind(wx.EVT_CHECKBOX, self.activateSlider)
self.change_marker_size.Bind(wx.EVT_SLIDER, self.OnSliderScroll)
|
def browseDir(self, event):
"""
Show the DirDialog and ask the user to change the directory where machine labels are stored
"""
self.statusbar.SetStatusText("Looking for a folder to start labeling...")
cwd = os.path.join(os.getcwd(), "labeled-data")
dlg = wx.DirDialog(
self,
"Choose the directory where your extracted frames are saved:",
cwd,
style=wx.DD_DEFAULT_STYLE,
)
if dlg.ShowModal() == wx.ID_OK:
self.dir = dlg.GetPath()
self.load.Enable(False)
self.next.Enable(True)
self.save.Enable(True)
else:
dlg.Destroy()
self.Close(True)
return
dlg.Destroy()
# Enabling the zoom, pan and home buttons
self.zoom.Enable(True)
self.home.Enable(True)
self.pan.Enable(True)
self.lock.Enable(True)
self.delete.Enable(True)
# Reading config file and its variables
self.cfg = auxiliaryfunctions.read_config(self.config_file)
self.scorer = self.cfg["scorer"]
(
individuals,
uniquebodyparts,
multianimalbodyparts,
) = auxfun_multianimal.extractindividualsandbodyparts(self.cfg)
self.multibodyparts = multianimalbodyparts
# checks for unique bodyparts
if len(self.multibodyparts) != len(set(self.multibodyparts)):
print(
"Error - bodyparts must have unique labels! Please choose unique bodyparts in config.yaml file and try again. Quitting for now!"
)
self.Close(True)
self.uniquebodyparts = uniquebodyparts
self.individual_names = individuals
self.videos = self.cfg["video_sets"].keys()
self.markerSize = self.cfg["dotsize"]
self.edgewidth = self.markerSize // 3
self.alpha = self.cfg["alphavalue"]
self.colormap = plt.get_cmap(self.cfg["colormap"])
self.colormap = self.colormap.reversed()
self.idmap = plt.cm.get_cmap("Set1", len(individuals))
self.project_path = self.cfg["project_path"]
if self.uniquebodyparts == []:
self.are_unique_bodyparts_present = False
self.buttonCounter = {i: [] for i in self.individual_names}
self.index = np.sort(
[
fn
for fn in glob.glob(os.path.join(self.dir, "*.png"))
if ("labeled.png" not in fn)
]
)
self.statusbar.SetStatusText(
"Working on folder: {}".format(os.path.split(str(self.dir))[-1])
)
self.relativeimagenames = [
"labeled" + n.split("labeled")[1] for n in self.index
] # [n.split(self.project_path+'/')[1] for n in self.index]
# Reading the existing dataset,if already present
try:
self.dataFrame = pd.read_hdf(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5"),
"df_with_missing",
)
# Handle data previously labeled on a different platform
sep = "/" if "/" in self.dataFrame.index[0] else "\\"
if sep != os.path.sep:
self.dataFrame.index = self.dataFrame.index.str.replace(sep, os.path.sep)
self.dataFrame.sort_index(inplace=True)
self.prev.Enable(True)
# Finds the first empty row in the dataframe and sets the iteration to that index
self.iter = np.argmax(np.isnan(self.dataFrame.values).all(axis=1))
except FileNotFoundError:
# Create an empty data frame
self.dataFrame = MainFrame.create_dataframe(
self,
self.dataFrame,
self.relativeimagenames,
self.individual_names,
self.uniquebodyparts,
self.multibodyparts,
)
self.iter = 0
# Cache original bodyparts
self._old_multi = (
self.dataFrame.xs(self.individual_names[0], axis=1, level="individuals")
.columns.get_level_values("bodyparts")
.unique()
.to_list()
)
self._old_unique = (
self.dataFrame.loc[
:, self.dataFrame.columns.get_level_values("individuals") == "single"
]
.columns.get_level_values("bodyparts")
.unique()
.to_list()
)
# Reading the image name
self.img = self.index[self.iter]
img_name = Path(self.index[self.iter]).name
# Checking for new frames and adding them to the existing dataframe
old_imgs = np.sort(list(self.dataFrame.index))
self.newimages = list(set(self.relativeimagenames) - set(old_imgs))
if self.newimages:
print("Found new frames..")
# Create an empty dataframe with all the new images and then merge this to the existing dataframe.
self.df = MainFrame.create_dataframe(
self,
None,
self.newimages,
self.individual_names,
self.uniquebodyparts,
self.multibodyparts,
)
self.dataFrame = pd.concat([self.dataFrame, self.df], axis=0)
self.dataFrame.sort_index(inplace=True)
# Rearrange bodypart columns in config order
bodyparts = self.multibodyparts + self.uniquebodyparts
self.dataFrame.reindex(
bodyparts, axis=1, level=self.dataFrame.columns.names.index("bodyparts")
)
# Test whether there are missing frames and superfluous data
if len(old_imgs) > len(self.relativeimagenames):
missing_frames = set(old_imgs).difference(self.relativeimagenames)
self.dataFrame.drop(missing_frames, inplace=True)
# Check whether new labels were added
self.new_multi = [x for x in self.multibodyparts if x not in self._old_multi]
self.new_unique = [x for x in self.uniquebodyparts if x not in self._old_unique]
# Checking if user added a new label
if not any([self.new_multi, self.new_unique]): # i.e. no new labels
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
self.image_axis,
) = self.image_panel.drawplot(
self.img,
img_name,
self.iter,
self.index,
self.multibodyparts,
self.colormap,
keep_view=self.view_locked,
)
else:
# Found new labels in either multiple bodyparts or unique bodyparts
dlg = wx.MessageDialog(
None,
"New label found in the config file. Do you want to see all the other labels?",
"New label found",
wx.YES_NO | wx.ICON_WARNING,
)
result = dlg.ShowModal()
if result == wx.ID_NO:
if self.new_multi:
self.multibodyparts = self.new_multi
if self.new_unique:
self.uniquebodyparts = self.new_unique
self.dataFrame = MainFrame.create_dataframe(
self,
self.dataFrame,
self.relativeimagenames,
self.individual_names,
self.new_unique,
self.new_multi,
)
(
self.figure,
self.axes,
self.canvas,
self.toolbar,
self.image_axis,
) = self.image_panel.drawplot(
self.img,
img_name,
self.iter,
self.index,
self.multibodyparts,
self.colormap,
keep_view=self.view_locked,
)
self.axes.callbacks.connect("xlim_changed", self.onZoom)
self.axes.callbacks.connect("ylim_changed", self.onZoom)
if self.individual_names[0] == "single":
(
self.choiceBox,
self.individualrdb,
self.rdb,
self.change_marker_size,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.uniquebodyparts, self.individual_names, self.file, self.markerSize
)
self.image_panel.addcolorbar(
self.img,
self.image_axis,
self.iter,
self.uniquebodyparts,
self.colormap,
)
else:
(
self.choiceBox,
self.individualrdb,
self.rdb,
self.change_marker_size,
self.checkBox,
) = self.choice_panel.addRadioButtons(
self.multibodyparts, self.individual_names, self.file, self.markerSize
)
self.image_panel.addcolorbar(
self.img, self.image_axis, self.iter, self.multibodyparts, self.colormap
)
self.individualrdb.Bind(wx.EVT_RADIOBOX, self.select_individual)
# check if single is slected when radio buttons are changed
if self.individualrdb.GetStringSelection() == "single":
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.uniquebodyparts
)
else:
self.norm, self.colorIndex = self.image_panel.getColorIndices(
self.img, self.multibodyparts
)
self.buttonCounter = MainFrame.plot(self, self.img)
self.cidClick = self.canvas.mpl_connect("button_press_event", self.onClick)
self.checkBox.Bind(wx.EVT_CHECKBOX, self.activateSlider)
self.change_marker_size.Bind(wx.EVT_SLIDER, self.OnSliderScroll)
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def saveDataSet(self, event):
"""
Saves the final dataframe
"""
self.statusbar.SetStatusText("File saved")
MainFrame.saveEachImage(self)
MainFrame.updateZoomPan(self)
# Windows compatible
self.dataFrame.sort_index(inplace=True)
# Discard data associated with bodyparts that are no longer in the config
config_bpts = self.cfg["multianimalbodyparts"] + self.cfg["uniquebodyparts"]
valid = [
bp in config_bpts for bp in self.dataFrame.columns.get_level_values("bodyparts")
]
self.dataFrame = self.dataFrame.loc[:, valid]
# Re-organize the dataframe so the CSV looks consistent with the config
self.dataFrame = self.dataFrame.reindex(
columns=self.individual_names, level="individuals"
).reindex(columns=config_bpts, level="bodyparts")
self.dataFrame.to_csv(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".csv")
)
self.dataFrame.to_hdf(
os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5"),
"df_with_missing",
format="table",
mode="w",
)
|
def saveDataSet(self, event):
"""
Saves the final dataframe
"""
# Backup previous save
from sys import platform
csv_path = os.path.join(self.dir, "CollectedData_" + self.scorer + ".csv")
hdf_path = os.path.join(self.dir, "CollectedData_" + self.scorer + ".h5")
csv_backup_path = csv_path.replace(".csv", ".csv.backup")
hdf_backup_path = hdf_path.replace(".h5", ".h5.backup")
if platform == "linux" or platform == "linux2":
if os.path.exists(csv_path):
os.rename(csv_path, csv_backup_path)
if os.path.exists(hdf_path):
os.rename(hdf_path, hdf_backup_path)
elif platform == "win32":
if os.path.exists(csv_path):
if os.path.exists(csv_backup_path): # check if backupfile exists already
os.remove(
csv_backup_path
) # requires double action as windows fails to rename file if exists already
os.rename(csv_path, csv_backup_path)
if os.path.exists(hdf_path):
if os.path.exists(hdf_backup_path):
os.remove(hdf_backup_path)
os.rename(hdf_path, hdf_backup_path)
elif platform == "darwin":
try:
if os.path.exists(csv_path):
os.rename(csv_path, csv_backup_path)
if os.path.exists(hdf_path):
os.rename(hdf_path, hdf_backup_path)
except:
print(" Unexpected os.rename behaviour, try win32 approach")
self.statusbar.SetStatusText("File saved")
MainFrame.saveEachImage(self)
MainFrame.updateZoomPan(self)
# Windows compatible
self.dataFrame.sort_index(inplace=True)
# Discard data associated with bodyparts that are no longer in the config
config_bpts = self.cfg["multianimalbodyparts"] + self.cfg["uniquebodyparts"]
valid = [
bp in config_bpts for bp in self.dataFrame.columns.get_level_values("bodyparts")
]
self.dataFrame = self.dataFrame.loc[:, valid]
# Re-organize the dataframe so the CSV looks consistent with the config
self.dataFrame = self.dataFrame.reindex(
columns=self.individual_names, level="individuals"
).reindex(columns=config_bpts, level="bodyparts")
self.dataFrame.to_csv(csv_path)
self.dataFrame.to_hdf(hdf_path, "df_with_missing", format="table", mode="w")
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def create_training_dataset(
config,
num_shuffles=1,
Shuffles=None,
windows2linux=False,
userfeedback=False,
trainIndices=None,
testIndices=None,
net_type=None,
augmenter_type=None,
):
"""
Creates a training dataset. Labels from all the extracted frames are merged into a single .h5 file.\n
Only the videos included in the config file are used to create this dataset.\n
[OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.
Parameter
----------
config : string
Full path of the config.yaml file as a string.
num_shuffles : int, optional
Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.
Shuffles: list of shuffles.
Alternatively the user can also give a list of shuffles (integers!).
windows2linux: bool.
The annotation files contain path formated according to your operating system. If you label on windows
but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.
userfeedback: bool, optional
If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.
trainIndices: list of lists, optional (default=None)
List of one or multiple lists containing train indexes.
A list containing two lists of training indexes will produce two splits.
testIndices: list of lists, optional (default=None)
List of one or multiple lists containing test indexes.
net_type: list
Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0, mobilenet_v2_0.75,
mobilenet_v2_0.5, mobilenet_v2_0.35, efficientnet-b0, efficientnet-b1, efficientnet-b2, efficientnet-b3,
efficientnet-b4, efficientnet-b5, and efficientnet-b6 are supported.
augmenter_type: string
Type of augmenter. Currently default, imgaug, tensorpack, and deterministic are supported.
Example
--------
>>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
Windows:
>>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
--------
"""
import scipy.io as sio
# Loading metadata from config file:
cfg = auxiliaryfunctions.read_config(config)
if cfg.get("multianimalproject", False):
from deeplabcut.generate_training_dataset.multiple_individuals_trainingsetmanipulation import (
create_multianimaltraining_dataset,
)
create_multianimaltraining_dataset(
config, num_shuffles, Shuffles, windows2linux, net_type
)
else:
scorer = cfg["scorer"]
project_path = cfg["project_path"]
# Create path for training sets & store data there
trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
cfg
) # Path concatenation OS platform independent
auxiliaryfunctions.attempttomakefolder(
Path(os.path.join(project_path, str(trainingsetfolder))), recursive=True
)
Data = merge_annotateddatasets(
cfg, Path(os.path.join(project_path, trainingsetfolder)), windows2linux
)
if Data is None:
return
Data = Data[scorer] # extract labeled data
# loading & linking pretrained models
if net_type is None: # loading & linking pretrained models
net_type = cfg.get("default_net_type", "resnet_50")
else:
if (
"resnet" in net_type
or "mobilenet" in net_type
or "efficientnet" in net_type
):
pass
else:
raise ValueError("Invalid network type:", net_type)
if augmenter_type is None:
augmenter_type = cfg.get("default_augmenter", "imgaug")
if augmenter_type is None: # this could be in config.yaml for old projects!
# updating variable if null/None! #backwardscompatability
auxiliaryfunctions.edit_config(config, {"default_augmenter": "imgaug"})
augmenter_type = "imgaug"
else:
if augmenter_type in [
"default",
"scalecrop",
"imgaug",
"tensorpack",
"deterministic",
]:
pass
else:
raise ValueError("Invalid augmenter type:", augmenter_type)
# Loading the encoder (if necessary downloading from TF)
dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
model_path, num_shuffles = auxfun_models.Check4weights(
net_type, Path(dlcparent_path), num_shuffles
)
if Shuffles is None:
Shuffles = range(1, num_shuffles + 1)
else:
Shuffles = [i for i in Shuffles if isinstance(i, int)]
# print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
if trainIndices is None and testIndices is None:
splits = [
(
trainFraction,
shuffle,
SplitTrials(range(len(Data.index)), trainFraction),
)
for trainFraction in cfg["TrainingFraction"]
for shuffle in Shuffles
]
else:
if len(trainIndices) != len(testIndices) != len(Shuffles):
raise ValueError(
"Number of Shuffles and train and test indexes should be equal."
)
splits = []
for shuffle, (train_inds, test_inds) in enumerate(
zip(trainIndices, testIndices)
):
trainFraction = round(
len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)), 2
)
print(
f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
)
splits.append(
(trainFraction, Shuffles[shuffle], (train_inds, test_inds))
)
bodyparts = cfg["bodyparts"]
nbodyparts = len(bodyparts)
for trainFraction, shuffle, (trainIndices, testIndices) in splits:
if len(trainIndices) > 0:
if userfeedback:
trainposeconfigfile, _, _ = training.return_train_network_path(
config,
shuffle=shuffle,
trainingsetindex=cfg["TrainingFraction"].index(trainFraction),
)
if trainposeconfigfile.is_file():
askuser = input(
"The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): "
)
if (
askuser == "no"
or askuser == "No"
or askuser == "N"
or askuser == "No"
):
raise Exception(
"Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details."
)
####################################################
# Generating data structure with labeled information & frame metadata (for deep cut)
####################################################
# Make training file!
(
datafilename,
metadatafilename,
) = auxiliaryfunctions.GetDataandMetaDataFilenames(
trainingsetfolder, trainFraction, shuffle, cfg
)
################################################################################
# Saving data file (convert to training file for deeper cut (*.mat))
################################################################################
data, MatlabData = format_training_data(
Data, trainIndices, nbodyparts, project_path
)
sio.savemat(
os.path.join(project_path, datafilename), {"dataset": MatlabData}
)
################################################################################
# Saving metadata (Pickle file)
################################################################################
auxiliaryfunctions.SaveMetadata(
os.path.join(project_path, metadatafilename),
data,
trainIndices,
testIndices,
trainFraction,
)
################################################################################
# Creating file structure for training &
# Test files as well as pose_yaml files (containing training and testing information)
#################################################################################
modelfoldername = auxiliaryfunctions.GetModelFolder(
trainFraction, shuffle, cfg
)
auxiliaryfunctions.attempttomakefolder(
Path(config).parents[0] / modelfoldername, recursive=True
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/train"
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/test"
)
path_train_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"train",
"pose_cfg.yaml",
)
)
path_test_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"test",
"pose_cfg.yaml",
)
)
# str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test' / 'pose_cfg.yaml')
items2change = {
"dataset": datafilename,
"metadataset": metadatafilename,
"num_joints": len(bodyparts),
"all_joints": [[i] for i in range(len(bodyparts))],
"all_joints_names": [str(bpt) for bpt in bodyparts],
"init_weights": model_path,
"project_path": str(cfg["project_path"]),
"net_type": net_type,
"dataset_type": augmenter_type,
}
items2drop = {}
if augmenter_type == "scalecrop":
# these values are dropped as scalecrop
# doesn't have rotation implemented
items2drop = {"rotation": 0, "rotratio": 0.0}
trainingdata = MakeTrain_pose_yaml(
items2change, path_train_config, defaultconfigfile, items2drop
)
keys2save = [
"dataset",
"num_joints",
"all_joints",
"all_joints_names",
"net_type",
"init_weights",
"global_scale",
"location_refinement",
"locref_stdev",
]
MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
print(
"The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
)
return splits
|
def create_training_dataset(
config,
num_shuffles=1,
Shuffles=None,
windows2linux=False,
userfeedback=False,
trainIndices=None,
testIndices=None,
net_type=None,
augmenter_type=None,
):
"""
Creates a training dataset. Labels from all the extracted frames are merged into a single .h5 file.\n
Only the videos included in the config file are used to create this dataset.\n
[OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.
Parameter
----------
config : string
Full path of the config.yaml file as a string.
num_shuffles : int, optional
Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.
Shuffles: list of shuffles.
Alternatively the user can also give a list of shuffles (integers!).
windows2linux: bool.
The annotation files contain path formated according to your operating system. If you label on windows
but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.
userfeedback: bool, optional
If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.
trainIndices: list of lists, optional (default=None)
List of one or multiple lists containing train indexes.
A list containing two lists of training indexes will produce two splits.
testIndices: list of lists, optional (default=None)
List of one or multiple lists containing test indexes.
net_type: list
Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0, mobilenet_v2_0.75,
mobilenet_v2_0.5, mobilenet_v2_0.35, efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3,
efficientnet_b4, efficientnet_b5, and efficientnet_b6 are supported.
augmenter_type: string
Type of augmenter. Currently default, imgaug, tensorpack, and deterministic are supported.
Example
--------
>>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
Windows:
>>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
--------
"""
import scipy.io as sio
# Loading metadata from config file:
cfg = auxiliaryfunctions.read_config(config)
if cfg.get("multianimalproject", False):
from deeplabcut.generate_training_dataset.multiple_individuals_trainingsetmanipulation import (
create_multianimaltraining_dataset,
)
create_multianimaltraining_dataset(
config, num_shuffles, Shuffles, windows2linux, net_type
)
else:
scorer = cfg["scorer"]
project_path = cfg["project_path"]
# Create path for training sets & store data there
trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
cfg
) # Path concatenation OS platform independent
auxiliaryfunctions.attempttomakefolder(
Path(os.path.join(project_path, str(trainingsetfolder))), recursive=True
)
Data = merge_annotateddatasets(
cfg, Path(os.path.join(project_path, trainingsetfolder)), windows2linux
)
if Data is None:
return
Data = Data[scorer] # extract labeled data
# loading & linking pretrained models
if net_type is None: # loading & linking pretrained models
net_type = cfg.get("default_net_type", "resnet_50")
else:
if (
"resnet" in net_type
or "mobilenet" in net_type
or "efficientnet" in net_type
):
pass
else:
raise ValueError("Invalid network type:", net_type)
if augmenter_type is None:
augmenter_type = cfg.get("default_augmenter", "imgaug")
if augmenter_type is None: # this could be in config.yaml for old projects!
# updating variable if null/None! #backwardscompatability
auxiliaryfunctions.edit_config(config, {"default_augmenter": "imgaug"})
augmenter_type = "imgaug"
else:
if augmenter_type in [
"default",
"scalecrop",
"imgaug",
"tensorpack",
"deterministic",
]:
pass
else:
raise ValueError("Invalid augmenter type:", augmenter_type)
# Loading the encoder (if necessary downloading from TF)
dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
model_path, num_shuffles = auxfun_models.Check4weights(
net_type, Path(dlcparent_path), num_shuffles
)
if Shuffles is None:
Shuffles = range(1, num_shuffles + 1)
else:
Shuffles = [i for i in Shuffles if isinstance(i, int)]
# print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
if trainIndices is None and testIndices is None:
splits = [
(
trainFraction,
shuffle,
SplitTrials(range(len(Data.index)), trainFraction),
)
for trainFraction in cfg["TrainingFraction"]
for shuffle in Shuffles
]
else:
if len(trainIndices) != len(testIndices) != len(Shuffles):
raise ValueError(
"Number of Shuffles and train and test indexes should be equal."
)
splits = []
for shuffle, (train_inds, test_inds) in enumerate(
zip(trainIndices, testIndices)
):
trainFraction = round(
len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)), 2
)
print(
f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
)
splits.append(
(trainFraction, Shuffles[shuffle], (train_inds, test_inds))
)
bodyparts = cfg["bodyparts"]
nbodyparts = len(bodyparts)
for trainFraction, shuffle, (trainIndices, testIndices) in splits:
if len(trainIndices) > 0:
if userfeedback:
trainposeconfigfile, _, _ = training.return_train_network_path(
config,
shuffle=shuffle,
trainingsetindex=cfg["TrainingFraction"].index(trainFraction),
)
if trainposeconfigfile.is_file():
askuser = input(
"The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): "
)
if (
askuser == "no"
or askuser == "No"
or askuser == "N"
or askuser == "No"
):
raise Exception(
"Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details."
)
####################################################
# Generating data structure with labeled information & frame metadata (for deep cut)
####################################################
# Make training file!
(
datafilename,
metadatafilename,
) = auxiliaryfunctions.GetDataandMetaDataFilenames(
trainingsetfolder, trainFraction, shuffle, cfg
)
################################################################################
# Saving data file (convert to training file for deeper cut (*.mat))
################################################################################
data, MatlabData = format_training_data(
Data, trainIndices, nbodyparts, project_path
)
sio.savemat(
os.path.join(project_path, datafilename), {"dataset": MatlabData}
)
################################################################################
# Saving metadata (Pickle file)
################################################################################
auxiliaryfunctions.SaveMetadata(
os.path.join(project_path, metadatafilename),
data,
trainIndices,
testIndices,
trainFraction,
)
################################################################################
# Creating file structure for training &
# Test files as well as pose_yaml files (containing training and testing information)
#################################################################################
modelfoldername = auxiliaryfunctions.GetModelFolder(
trainFraction, shuffle, cfg
)
auxiliaryfunctions.attempttomakefolder(
Path(config).parents[0] / modelfoldername, recursive=True
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/train"
)
auxiliaryfunctions.attempttomakefolder(
str(Path(config).parents[0] / modelfoldername) + "/test"
)
path_train_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"train",
"pose_cfg.yaml",
)
)
path_test_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"test",
"pose_cfg.yaml",
)
)
# str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test' / 'pose_cfg.yaml')
items2change = {
"dataset": datafilename,
"metadataset": metadatafilename,
"num_joints": len(bodyparts),
"all_joints": [[i] for i in range(len(bodyparts))],
"all_joints_names": [str(bpt) for bpt in bodyparts],
"init_weights": model_path,
"project_path": str(cfg["project_path"]),
"net_type": net_type,
"dataset_type": augmenter_type,
}
items2drop = {}
if augmenter_type == "scalecrop":
# these values are dropped as scalecrop
# doesn't have rotation implemented
items2drop = {"rotation": 0, "rotratio": 0.0}
trainingdata = MakeTrain_pose_yaml(
items2change, path_train_config, defaultconfigfile, items2drop
)
keys2save = [
"dataset",
"num_joints",
"all_joints",
"all_joints_names",
"net_type",
"init_weights",
"global_scale",
"location_refinement",
"locref_stdev",
]
MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
print(
"The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
)
return splits
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def create_training_model_comparison(
config,
trainindex=0,
num_shuffles=1,
net_types=["resnet_50"],
augmenter_types=["default"],
userfeedback=False,
windows2linux=False,
):
"""
Creates a training dataset with different networks and augmentation types (dataset_loader) so that the shuffles
have same training and testing indices.
Therefore, this function is useful for benchmarking the performance of different network and augmentation types on the same training/testdata.\n
Parameter
----------
config : string
Full path of the config.yaml file as a string.
trainindex: int, optional
Either (in case uniform = True) indexes which element of TrainingFraction in the config file should be used (note it is a list!).
Alternatively (uniform = False) indexes which folder is dropped, i.e. the first if trainindex=0, the second if trainindex =1, etc.
num_shuffles : int, optional
Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.
net_types: list
Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0,mobilenet_v2_0.75, mobilenet_v2_0.5, mobilenet_v2_0.35,
efficientnet-b0, efficientnet-b1, efficientnet-b2, efficientnet-b3, efficientnet-b4,
efficientnet-b5, and efficientnet-b6 are supported.
augmenter_types: list
Type of augmenters. Currently "default", "imgaug", "tensorpack", and "deterministic" are supported.
userfeedback: bool, optional
If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.
windows2linux: bool.
The annotation files contain path formated according to your operating system. If you label on windows
but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.
Example
--------
>>> deeplabcut.create_training_model_comparison('/analysis/project/reaching-task/config.yaml',num_shuffles=1,net_types=['resnet_50','resnet_152'],augmenter_types=['tensorpack','deterministic'])
Windows:
>>> deeplabcut.create_training_model_comparison('C:\\Users\\Ulf\\looming-task\\config.yaml',num_shuffles=1,net_types=['resnet_50','resnet_152'],augmenter_types=['tensorpack','deterministic'])
--------
"""
# read cfg file
cfg = auxiliaryfunctions.read_config(config)
# create log file
log_file_name = os.path.join(cfg["project_path"], "training_model_comparison.log")
logger = logging.getLogger("training_model_comparison")
if not logger.handlers:
logger = logging.getLogger("training_model_comparison")
hdlr = logging.FileHandler(log_file_name)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
else:
pass
largestshuffleindex = get_largestshuffle_index(config)
for shuffle in range(num_shuffles):
trainIndices, testIndices = mergeandsplit(
config, trainindex=trainindex, uniform=True
)
for idx_net, net in enumerate(net_types):
for idx_aug, aug in enumerate(augmenter_types):
get_max_shuffle_idx = (
largestshuffleindex
+ idx_aug
+ idx_net * len(augmenter_types)
+ shuffle * len(augmenter_types) * len(net_types)
)
log_info = str(
"Shuffle index:"
+ str(get_max_shuffle_idx)
+ ", net_type:"
+ net
+ ", augmenter_type:"
+ aug
+ ", trainsetindex:"
+ str(trainindex)
)
create_training_dataset(
config,
Shuffles=[get_max_shuffle_idx],
net_type=net,
trainIndices=[trainIndices],
testIndices=[testIndices],
augmenter_type=aug,
userfeedback=userfeedback,
windows2linux=windows2linux,
)
logger.info(log_info)
|
def create_training_model_comparison(
config,
trainindex=0,
num_shuffles=1,
net_types=["resnet_50"],
augmenter_types=["default"],
userfeedback=False,
windows2linux=False,
):
"""
Creates a training dataset with different networks and augmentation types (dataset_loader) so that the shuffles
have same training and testing indices.
Therefore, this function is useful for benchmarking the performance of different network and augmentation types on the same training/testdata.\n
Parameter
----------
config : string
Full path of the config.yaml file as a string.
trainindex: int, optional
Either (in case uniform = True) indexes which element of TrainingFraction in the config file should be used (note it is a list!).
Alternatively (uniform = False) indexes which folder is dropped, i.e. the first if trainindex=0, the second if trainindex =1, etc.
num_shuffles : int, optional
Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.
net_types: list
Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0,mobilenet_v2_0.75, mobilenet_v2_0.5, mobilenet_v2_0.35,
efficientnet_b0, efficientnet_b1, efficientnet_b2, efficientnet_b3, efficientnet_b4,
efficientnet_b5, and efficientnet_b6 are supported.
augmenter_types: list
Type of augmenters. Currently "default", "imgaug", "tensorpack", and "deterministic" are supported.
userfeedback: bool, optional
If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.
windows2linux: bool.
The annotation files contain path formated according to your operating system. If you label on windows
but train & evaluate on a unix system (e.g. ubunt, colab, Mac) set this variable to True to convert the paths.
Example
--------
>>> deeplabcut.create_training_model_comparison('/analysis/project/reaching-task/config.yaml',num_shuffles=1,net_types=['resnet_50','resnet_152'],augmenter_types=['tensorpack','deterministic'])
Windows:
>>> deeplabcut.create_training_model_comparison('C:\\Users\\Ulf\\looming-task\\config.yaml',num_shuffles=1,net_types=['resnet_50','resnet_152'],augmenter_types=['tensorpack','deterministic'])
--------
"""
# read cfg file
cfg = auxiliaryfunctions.read_config(config)
# create log file
log_file_name = os.path.join(cfg["project_path"], "training_model_comparison.log")
logger = logging.getLogger("training_model_comparison")
if not logger.handlers:
logger = logging.getLogger("training_model_comparison")
hdlr = logging.FileHandler(log_file_name)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
else:
pass
largestshuffleindex = get_largestshuffle_index(config)
for shuffle in range(num_shuffles):
trainIndices, testIndices = mergeandsplit(
config, trainindex=trainindex, uniform=True
)
for idx_net, net in enumerate(net_types):
for idx_aug, aug in enumerate(augmenter_types):
get_max_shuffle_idx = (
largestshuffleindex
+ idx_aug
+ idx_net * len(augmenter_types)
+ shuffle * len(augmenter_types) * len(net_types)
)
log_info = str(
"Shuffle index:"
+ str(get_max_shuffle_idx)
+ ", net_type:"
+ net
+ ", augmenter_type:"
+ aug
+ ", trainsetindex:"
+ str(trainindex)
)
create_training_dataset(
config,
Shuffles=[get_max_shuffle_idx],
net_type=net,
trainIndices=[trainIndices],
testIndices=[testIndices],
augmenter_type=aug,
userfeedback=userfeedback,
windows2linux=windows2linux,
)
logger.info(log_info)
|
https://github.com/DeepLabCut/DeepLabCut/issues/1110
|
pythonw -m deeplabcut
Starting GUI...
Found new frames..
Traceback (most recent call last):
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 767, in nextImage
self.updatedCoords = MainFrame.getLabels(self, self.iter)
File "/Users/brandonjackson/opt/miniconda3/envs/dlc/lib/python3.7/site-packages/deeplabcut/generate_training_dataset/labeling_toolbox.py", line 845, in getLabels
self.dataFrame[self.scorer][bp]["x"].values[self.iter],
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def __init__(self, **kwds):
GLGraphicsItem.__init__(self)
glopts = kwds.pop("glOptions", "additive")
self.setGLOptions(glopts)
self.pos = None
self.size = 10
self.color = [1.0, 1.0, 1.0, 0.5]
self.pxMode = True
# self.vbo = {} ## VBO does not appear to improve performance very much.
self.setData(**kwds)
self.shader = None
|
def __init__(self, **kwds):
GLGraphicsItem.__init__(self)
glopts = kwds.pop("glOptions", "additive")
self.setGLOptions(glopts)
self.pos = []
self.size = 10
self.color = [1.0, 1.0, 1.0, 0.5]
self.pxMode = True
# self.vbo = {} ## VBO does not appear to improve performance very much.
self.setData(**kwds)
self.shader = None
|
https://github.com/pyqtgraph/pyqtgraph/issues/1347
|
|==============================>>
| Traceback (most recent call last):
| File "filename.py", line 358, in <module>
| sys.exit(app.exec_())
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 193, in paintGL
| self.drawItemTree(useItemNames=useItemNames)
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 233, in drawItemTree
| self.drawItemTree(i, useItemNames=useItemNames)
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 214, in drawItemTree
| debug.printExc()
| --- exception caught here ---
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 211, in drawItemTree
| i.paint()
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\items\GLScatterPlotItem.py", line 152, in paint
| glDrawArrays(GL_POINTS, 0, int(pos.size / pos.shape[-1]))
| AttributeError: 'list' object has no attribute 'size'
|==============================<<
Error while drawing item <pyqtgraph.opengl.items.GLScatterPlotItem.GLScatterPlotItem(0xf20aa30) at 0x101A63A8>.
|
AttributeError
|
def paint(self):
if self.pos is None:
return
self.setupGLState()
glEnable(GL_POINT_SPRITE)
glActiveTexture(GL_TEXTURE0)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.pointTexture)
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)
# glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) ## use texture color exactly
# glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE ) ## texture modulates current color
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glEnable(GL_PROGRAM_POINT_SIZE)
with self.shader:
# glUniform1i(self.shader.uniform('texture'), 0) ## inform the shader which texture to use
glEnableClientState(GL_VERTEX_ARRAY)
try:
pos = self.pos
# if pos.ndim > 2:
# pos = pos.reshape((-1, pos.shape[-1]))
glVertexPointerf(pos)
if isinstance(self.color, np.ndarray):
glEnableClientState(GL_COLOR_ARRAY)
glColorPointerf(self.color)
else:
if isinstance(self.color, QtGui.QColor):
glColor4f(*fn.glColor(self.color))
else:
glColor4f(*self.color)
if not self.pxMode or isinstance(self.size, np.ndarray):
glEnableClientState(GL_NORMAL_ARRAY)
norm = np.empty(pos.shape)
if self.pxMode:
norm[..., 0] = self.size
else:
gpos = self.mapToView(pos.transpose()).transpose()
pxSize = self.view().pixelSize(gpos)
norm[..., 0] = self.size / pxSize
glNormalPointerf(norm)
else:
glNormal3f(
self.size, 0, 0
) ## vertex shader uses norm.x to determine point size
# glPointSize(self.size)
glDrawArrays(GL_POINTS, 0, int(pos.size / pos.shape[-1]))
finally:
glDisableClientState(GL_NORMAL_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
# posVBO.unbind()
##fixes #145
glDisable(GL_TEXTURE_2D)
|
def paint(self):
self.setupGLState()
glEnable(GL_POINT_SPRITE)
glActiveTexture(GL_TEXTURE0)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.pointTexture)
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)
# glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) ## use texture color exactly
# glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE ) ## texture modulates current color
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glEnable(GL_PROGRAM_POINT_SIZE)
with self.shader:
# glUniform1i(self.shader.uniform('texture'), 0) ## inform the shader which texture to use
glEnableClientState(GL_VERTEX_ARRAY)
try:
pos = self.pos
# if pos.ndim > 2:
# pos = pos.reshape((-1, pos.shape[-1]))
glVertexPointerf(pos)
if isinstance(self.color, np.ndarray):
glEnableClientState(GL_COLOR_ARRAY)
glColorPointerf(self.color)
else:
if isinstance(self.color, QtGui.QColor):
glColor4f(*fn.glColor(self.color))
else:
glColor4f(*self.color)
if not self.pxMode or isinstance(self.size, np.ndarray):
glEnableClientState(GL_NORMAL_ARRAY)
norm = np.empty(pos.shape)
if self.pxMode:
norm[..., 0] = self.size
else:
gpos = self.mapToView(pos.transpose()).transpose()
pxSize = self.view().pixelSize(gpos)
norm[..., 0] = self.size / pxSize
glNormalPointerf(norm)
else:
glNormal3f(
self.size, 0, 0
) ## vertex shader uses norm.x to determine point size
# glPointSize(self.size)
glDrawArrays(GL_POINTS, 0, int(pos.size / pos.shape[-1]))
finally:
glDisableClientState(GL_NORMAL_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
# posVBO.unbind()
##fixes #145
glDisable(GL_TEXTURE_2D)
|
https://github.com/pyqtgraph/pyqtgraph/issues/1347
|
|==============================>>
| Traceback (most recent call last):
| File "filename.py", line 358, in <module>
| sys.exit(app.exec_())
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 193, in paintGL
| self.drawItemTree(useItemNames=useItemNames)
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 233, in drawItemTree
| self.drawItemTree(i, useItemNames=useItemNames)
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 214, in drawItemTree
| debug.printExc()
| --- exception caught here ---
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 211, in drawItemTree
| i.paint()
| File "...\AppData\Roaming\Python\Python38\site-packages\pyqtgraph\opengl\items\GLScatterPlotItem.py", line 152, in paint
| glDrawArrays(GL_POINTS, 0, int(pos.size / pos.shape[-1]))
| AttributeError: 'list' object has no attribute 'size'
|==============================<<
Error while drawing item <pyqtgraph.opengl.items.GLScatterPlotItem.GLScatterPlotItem(0xf20aa30) at 0x101A63A8>.
|
AttributeError
|
def keyPressEvent(self, ev):
if not self.hasTimeAxis():
super().keyPressEvent(ev)
return
if ev.key() == QtCore.Qt.Key_Space:
if self.playRate == 0:
self.play()
else:
self.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_Home:
self.setCurrentIndex(0)
self.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_End:
self.setCurrentIndex(self.getProcessedImage().shape[0] - 1)
self.play(0)
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.keysPressed[ev.key()] = 1
self.evalKeyState()
else:
super().keyPressEvent(ev)
|
def keyPressEvent(self, ev):
# print ev.key()
if ev.key() == QtCore.Qt.Key_Space:
if self.playRate == 0:
self.play()
else:
self.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_Home:
self.setCurrentIndex(0)
self.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_End:
self.setCurrentIndex(self.getProcessedImage().shape[0] - 1)
self.play(0)
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.keysPressed[ev.key()] = 1
self.evalKeyState()
else:
super().keyPressEvent(ev)
|
https://github.com/pyqtgraph/pyqtgraph/issues/1545
|
Traceback (most recent call last):
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 452, in keyPressEvent
self.evalKeyState()
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 476, in evalKeyState
self.jumpFrames(1)
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 516, in jumpFrames
if self.axes['t'] is not None:
KeyError: 't'
|
KeyError
|
def keyReleaseEvent(self, ev):
if not self.hasTimeAxis():
super().keyReleaseEvent(ev)
return
if ev.key() in [QtCore.Qt.Key_Space, QtCore.Qt.Key_Home, QtCore.Qt.Key_End]:
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
try:
del self.keysPressed[ev.key()]
except:
self.keysPressed = {}
self.evalKeyState()
else:
super().keyReleaseEvent(ev)
|
def keyReleaseEvent(self, ev):
if ev.key() in [QtCore.Qt.Key_Space, QtCore.Qt.Key_Home, QtCore.Qt.Key_End]:
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
try:
del self.keysPressed[ev.key()]
except:
self.keysPressed = {}
self.evalKeyState()
else:
super().keyReleaseEvent(ev)
|
https://github.com/pyqtgraph/pyqtgraph/issues/1545
|
Traceback (most recent call last):
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 452, in keyPressEvent
self.evalKeyState()
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 476, in evalKeyState
self.jumpFrames(1)
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 516, in jumpFrames
if self.axes['t'] is not None:
KeyError: 't'
|
KeyError
|
def timeIndex(self, slider):
## Return the time and frame index indicated by a slider
if not self.hasTimeAxis():
return (0, 0)
t = slider.value()
xv = self.tVals
if xv is None:
ind = int(t)
else:
if len(xv) < 2:
return (0, 0)
totTime = xv[-1] + (xv[-1] - xv[-2])
inds = np.argwhere(xv <= t)
if len(inds) < 1:
return (0, t)
ind = inds[-1, 0]
return ind, t
|
def timeIndex(self, slider):
## Return the time and frame index indicated by a slider
if self.image is None:
return (0, 0)
t = slider.value()
xv = self.tVals
if xv is None:
ind = int(t)
else:
if len(xv) < 2:
return (0, 0)
totTime = xv[-1] + (xv[-1] - xv[-2])
inds = np.argwhere(xv <= t)
if len(inds) < 1:
return (0, t)
ind = inds[-1, 0]
return ind, t
|
https://github.com/pyqtgraph/pyqtgraph/issues/1545
|
Traceback (most recent call last):
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 452, in keyPressEvent
self.evalKeyState()
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 476, in evalKeyState
self.jumpFrames(1)
File "D:\Repositories\Art\pyqtgraph\pyqtgraph\imageview\ImageView.py", line 516, in jumpFrames
if self.axes['t'] is not None:
KeyError: 't'
|
KeyError
|
def roiChanged(self):
# Extract image data from ROI
if self.image is None:
return
image = self.getProcessedImage()
# getArrayRegion axes should be (x, y) of data array for col-major,
# (y, x) for row-major
# can't just transpose input because ROI is axisOrder aware
colmaj = self.imageItem.axisOrder == "col-major"
if colmaj:
axes = (self.axes["x"], self.axes["y"])
else:
axes = (self.axes["y"], self.axes["x"])
data, coords = self.roi.getArrayRegion(
image.view(np.ndarray), img=self.imageItem, axes=axes, returnMappedCoords=True
)
if data is None:
return
# Convert extracted data into 1D plot data
if self.axes["t"] is None:
# Average across y-axis of ROI
data = data.mean(axis=self.axes["y"])
# get coordinates along x axis of ROI mapped to range (0, roiwidth)
if colmaj:
coords = coords[:, :, 0] - coords[:, 0:1, 0]
else:
coords = coords[:, 0, :] - coords[:, 0, 0:1]
xvals = (coords**2).sum(axis=0) ** 0.5
else:
# Average data within entire ROI for each frame
data = data.mean(axis=axes)
xvals = self.tVals
# Handle multi-channel data
if data.ndim == 1:
plots = [(xvals, data, "w")]
if data.ndim == 2:
if data.shape[1] == 1:
colors = "w"
else:
colors = "rgbw"
plots = []
for i in range(data.shape[1]):
d = data[:, i]
plots.append((xvals, d, colors[i]))
# Update plot line(s)
while len(plots) < len(self.roiCurves):
c = self.roiCurves.pop()
c.scene().removeItem(c)
while len(plots) > len(self.roiCurves):
self.roiCurves.append(self.ui.roiPlot.plot())
for i in range(len(plots)):
x, y, p = plots[i]
self.roiCurves[i].setData(x, y, pen=p)
|
def roiChanged(self):
if self.image is None:
return
image = self.getProcessedImage()
# Extract image data from ROI
axes = (self.axes["x"], self.axes["y"])
data, coords = self.roi.getArrayRegion(
image.view(np.ndarray), self.imageItem, returnMappedCoords=True
)
if data is None:
return
# Convert extracted data into 1D plot data
if self.axes["t"] is None:
# Average across y-axis of ROI
data = data.mean(axis=axes[1])
if (
axes == (1, 0)
): ## we're in row-major order mode -- there's probably a better way to do this slicing dynamically, but I've not figured it out yet.
coords = coords[:, 0, :] - coords[:, 0, 0:1]
else: # default to old way
coords = coords[:, :, 0] - coords[:, 0:1, 0]
xvals = (coords**2).sum(axis=0) ** 0.5
else:
# Average data within entire ROI for each frame
data = data.mean(axis=max(axes)).mean(axis=min(axes))
xvals = self.tVals
# Handle multi-channel data
if data.ndim == 1:
plots = [(xvals, data, "w")]
if data.ndim == 2:
if data.shape[1] == 1:
colors = "w"
else:
colors = "rgbw"
plots = []
for i in range(data.shape[1]):
d = data[:, i]
plots.append((xvals, d, colors[i]))
# Update plot line(s)
while len(plots) < len(self.roiCurves):
c = self.roiCurves.pop()
c.scene().removeItem(c)
while len(plots) > len(self.roiCurves):
self.roiCurves.append(self.ui.roiPlot.plot())
for i in range(len(plots)):
x, y, p = plots[i]
self.roiCurves[i].setData(x, y, pen=p)
|
https://github.com/pyqtgraph/pyqtgraph/issues/1280
|
Traceback (most recent call last):
File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/imageview/ImageView.py", line 566, in roiClicked
self.roiChanged()
File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/imageview/ImageView.py", line 640, in roiChanged
self.roiCurves[i].setData(x, y, pen=p)
File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotDataItem.py", line 471, in setData
self.updateItems()
File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotDataItem.py", line 497, in updateItems
self.curve.setData(x=x, y=y, **curveArgs)
File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 335, in setData
self.updateData(*args, **kargs)
File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 385, in updateData
raise Exception("X and Y arrays must be the same shape--got %s and %s." % (self.xData.shape, self.yData.shape))
Exception: X and Y arrays must be the same shape--got (100,) and (10,).
[22:13:37] Ignored exception:
|==============================>>
| Traceback (most recent call last):
| File "/Users/ogi/Developer/pyqtgraph/examples/ImageView.py", line 70, in <module>
| QtGui.QApplication.instance().exec_()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/widgets/GraphicsView.py", line 155, in paintEvent
| return QtGui.QGraphicsView.paintEvent(self, ev)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 93, in w
| printExc('Ignored exception:')
| --- exception caught here ---
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 91, in w
| func(*args, **kwds)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 464, in paint
| path = self.getPath()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 446, in getPath
| self.path = self.generatePath(*self.getData())
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 435, in generatePath
| path = fn.arrayToQPath(x, y, connect=self.opts['connect'])
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/functions.py", line 1496, in arrayToQPath
| arr[1:-1]['y'] = y
| ValueError: could not broadcast input array from shape (10) into shape (100)
|==============================<<
[22:13:37] Ignored exception:
|==============================>>
| Traceback (most recent call last):
| File "/Users/ogi/Developer/pyqtgraph/examples/ImageView.py", line 70, in <module>
| QtGui.QApplication.instance().exec_()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/widgets/GraphicsView.py", line 155, in paintEvent
| return QtGui.QGraphicsView.paintEvent(self, ev)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 93, in w
| printExc('Ignored exception:')
| --- exception caught here ---
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 91, in w
| func(*args, **kwds)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 464, in paint
| path = self.getPath()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 446, in getPath
| self.path = self.generatePath(*self.getData())
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 435, in generatePath
| path = fn.arrayToQPath(x, y, connect=self.opts['connect'])
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/functions.py", line 1496, in arrayToQPath
| arr[1:-1]['y'] = y
| ValueError: could not broadcast input array from shape (10) into shape (100)
|==============================<<
[22:13:38] Ignored exception:
|==============================>>
| Traceback (most recent call last):
| File "/Users/ogi/Developer/pyqtgraph/examples/ImageView.py", line 70, in <module>
| QtGui.QApplication.instance().exec_()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/widgets/GraphicsView.py", line 155, in paintEvent
| return QtGui.QGraphicsView.paintEvent(self, ev)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 93, in w
| printExc('Ignored exception:')
| --- exception caught here ---
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 91, in w
| func(*args, **kwds)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 464, in paint
| path = self.getPath()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 446, in getPath
| self.path = self.generatePath(*self.getData())
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 435, in generatePath
| path = fn.arrayToQPath(x, y, connect=self.opts['connect'])
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/functions.py", line 1496, in arrayToQPath
| arr[1:-1]['y'] = y
| ValueError: could not broadcast input array from shape (10) into shape (100)
|==============================<<
[22:13:38] Ignored exception:
|==============================>>
| Traceback (most recent call last):
| File "/Users/ogi/Developer/pyqtgraph/examples/ImageView.py", line 70, in <module>
| QtGui.QApplication.instance().exec_()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/widgets/GraphicsView.py", line 155, in paintEvent
| return QtGui.QGraphicsView.paintEvent(self, ev)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 93, in w
| printExc('Ignored exception:')
| --- exception caught here ---
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/debug.py", line 91, in w
| func(*args, **kwds)
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 464, in paint
| path = self.getPath()
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 446, in getPath
| self.path = self.generatePath(*self.getData())
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/graphicsItems/PlotCurveItem.py", line 435, in generatePath
| path = fn.arrayToQPath(x, y, connect=self.opts['connect'])
| File "/Users/ogi/Developer/pyqtgraph/pyqtgraph/functions.py", line 1496, in arrayToQPath
| arr[1:-1]['y'] = y
| ValueError: could not broadcast input array from shape (10) into shape (100)
|==============================<<
|
Exception
|
def setParentItem(self, p):
ret = GraphicsWidget.setParentItem(self, p)
if self.opts["offset"] is not None:
offset = Point(self.opts["offset"])
anchorx = 1 if offset[0] <= 0 else 0
anchory = 1 if offset[1] <= 0 else 0
anchor = (anchorx, anchory)
self.anchor(itemPos=anchor, parentPos=anchor, offset=offset)
return ret
|
def setParentItem(self, p):
ret = GraphicsWidget.setParentItem(self, p)
if self.offset is not None:
offset = Point(self.opts["offset"])
anchorx = 1 if offset[0] <= 0 else 0
anchory = 1 if offset[1] <= 0 else 0
anchor = (anchorx, anchory)
self.anchor(itemPos=anchor, parentPos=anchor, offset=offset)
return ret
|
https://github.com/pyqtgraph/pyqtgraph/issues/1094
|
Traceback (most recent call last):
File "/home/vin/test/testlegend.py", line 7, in <module>
l.setParentItem(plt.graphicsItem())
File "/home/vin/miniconda3/lib/python3.7/site-packages/pyqtgraph/graphicsItems/LegendItem.py", line 128, in setParentItem
offset = Point(self.opts['offset'])
File "/home/vin/miniconda3/lib/python3.7/site-packages/pyqtgraph/Point.py", line 35, in __init__
QtCore.QPointF.__init__(self, *args)
TypeError: arguments did not match any overloaded call:
QPointF(): too many arguments
QPointF(float, float): argument 1 has unexpected type 'NoneType'
QPointF(QPoint): argument 1 has unexpected type 'NoneType'
QPointF(QPointF): argument 1 has unexpected type 'NoneType'
|
TypeError
|
def checkOpenGLVersion(self, msg):
## Only to be called from within exception handler.
ver = glGetString(GL_VERSION).split()[0]
if int(ver.split(b".")[0]) < 2:
from .. import debug
debug.printExc()
raise Exception(
msg
+ " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue."
% ver
)
else:
raise
|
def checkOpenGLVersion(self, msg):
## Only to be called from within exception handler.
ver = glGetString(GL_VERSION).split()[0]
if int(ver.split(".")[0]) < 2:
from .. import debug
debug.printExc()
raise Exception(
msg
+ " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue."
% ver
)
else:
raise
|
https://github.com/pyqtgraph/pyqtgraph/issues/261
|
Using PyQt5 (default graphics system)
Traceback (most recent call last):
File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64
\lib\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 55, in addItem
item.initializeGL()
File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64
\lib\site-packages\pyqtgraph\opengl\items\GLImageItem.py", line 34, in initializ
eGL
glEnable(GL_TEXTURE_2D)
File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64
\lib\site-packages\OpenGL\platform\baseplatform.py", line 402, in __call__
return self( *args, **named )
File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64
\lib\site-packages\OpenGL\error.py", line 232, in glCheckError
baseOperation = baseOperation,
OpenGL.error.GLError: GLError(
err = 1280,
description = b'\xe9num\xe9rant non valide',
baseOperation = glEnable,
cArguments = (GL_TEXTURE_2D,)
)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64
\lib\site-packages\pyqtgraph\examples\GLImageItem.py", line 41, in <module>
w.addItem(v1)
File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64
\lib\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 57, in addItem
self.checkOpenGLVersion('Error while adding item %s to GLViewWidget.' % str(
item))
File "D:\WinPython\basedir34\buildQt5\winpython-3.4.4.amd64\python-3.4.4.amd64
\lib\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 382, in checkOpenGLVe
rsion
if int(ver.split('.')[0]) < 2:
TypeError: 'str' does not support the buffer interface
|
OpenGL.error.GLError
|
def __init__(self, pos, color, mode=None):
"""
=============== ==============================================================
**Arguments:**
pos Array of positions where each color is defined
color Array of colors.
Values are interpreted via
:func:`mkColor() <pyqtgraph.mkColor>`.
mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
indicating the color space that should be used when
interpolating between stops. Note that the last mode value is
ignored. By default, the mode is entirely RGB.
=============== ==============================================================
"""
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
self.color = np.apply_along_axis(
func1d=lambda x: mkColor(x).getRgb(),
axis=-1,
arr=color,
)[order]
if mode is None:
mode = np.ones(len(pos))
self.mode = mode
self.stopsCache = {}
|
def __init__(self, pos, color, mode=None):
"""
=============== ==============================================================
**Arguments:**
pos Array of positions where each color is defined
color Array of RGBA colors.
Integer data types are interpreted as 0-255; float data types
are interpreted as 0.0-1.0
mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
indicating the color space that should be used when
interpolating between stops. Note that the last mode value is
ignored. By default, the mode is entirely RGB.
=============== ==============================================================
"""
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
self.color = np.array(color)[order]
if mode is None:
mode = np.ones(len(pos))
self.mode = mode
self.stopsCache = {}
|
https://github.com/pyqtgraph/pyqtgraph/issues/1009
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
cmap.getLookupTable() # Does not work
File "...\venv\lib\site-packages\pyqtgraph\colormap.py", line 223, in getLookupTable
alpha = self.usesAlpha()
File "...\venv\lib\site-packages\pyqtgraph\colormap.py", line 236, in usesAlpha
return np.any(self.color[:,3] != max)
IndexError: index 3 is out of bounds for axis 1 with size 3
|
IndexError
|
def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode="byte"):
"""
Return an RGB(A) lookup table (ndarray).
=============== =============================================================================
**Arguments:**
start The starting value in the lookup table (default=0.0)
stop The final value in the lookup table (default=1.0)
nPts The number of points in the returned lookup table.
alpha True, False, or None - Specifies whether or not alpha values are included
in the table. If alpha is None, it will be automatically determined.
mode Determines return type: 'byte' (0-255), 'float' (0.0-1.0), or 'qcolor'.
See :func:`map() <pyqtgraph.ColorMap.map>`.
=============== =============================================================================
"""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
if alpha is None:
alpha = self.usesAlpha()
x = np.linspace(start, stop, nPts)
table = self.map(x, mode)
if not alpha and mode != self.QCOLOR:
return table[:, :3]
else:
return table
|
def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode="byte"):
"""
Return an RGB(A) lookup table (ndarray).
=============== =============================================================================
**Arguments:**
start The starting value in the lookup table (default=0.0)
stop The final value in the lookup table (default=1.0)
nPts The number of points in the returned lookup table.
alpha True, False, or None - Specifies whether or not alpha values are included
in the table. If alpha is None, it will be automatically determined.
mode Determines return type: 'byte' (0-255), 'float' (0.0-1.0), or 'qcolor'.
See :func:`map() <pyqtgraph.ColorMap.map>`.
=============== =============================================================================
"""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
if alpha is None:
alpha = self.usesAlpha()
x = np.linspace(start, stop, nPts)
table = self.map(x, mode)
if not alpha:
return table[:, :3]
else:
return table
|
https://github.com/pyqtgraph/pyqtgraph/issues/1009
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
cmap.getLookupTable() # Does not work
File "...\venv\lib\site-packages\pyqtgraph\colormap.py", line 223, in getLookupTable
alpha = self.usesAlpha()
File "...\venv\lib\site-packages\pyqtgraph\colormap.py", line 236, in usesAlpha
return np.any(self.color[:,3] != max)
IndexError: index 3 is out of bounds for axis 1 with size 3
|
IndexError
|
def __init__(
self,
region=None,
host=None,
session_cls=None,
request_timeout_seconds=None,
max_retry_attempts=None,
base_backoff_ms=None,
):
self._tables = {}
self.host = host
self._local = local()
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value("region")
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value("session_cls")
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value("request_timeout_seconds")
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value("max_retry_attempts")
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value("base_backoff_ms")
|
def __init__(
self,
region=None,
host=None,
session_cls=None,
request_timeout_seconds=None,
max_retry_attempts=None,
base_backoff_ms=None,
):
self._tables = {}
self.host = host
self._session = None
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value("region")
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value("session_cls")
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value("request_timeout_seconds")
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value("max_retry_attempts")
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value("base_backoff_ms")
|
https://github.com/pynamodb/PynamoDB/issues/153
|
Traceback (most recent call last):
File "./instrumentation/utils.py", line 69, in save_driver_log
driver_log.save()
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/models.py", line 379, in save
data = self._get_connection().put_item(*args, **kwargs)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/table.py", line 83, in put_item
return_item_collection_metrics=return_item_collection_metrics)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 806, in put_item
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 648, in get_identifier_map
tbl = self.get_meta_table(table_name)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 399, in get_meta_table
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 235, in dispatch
data = self._make_api_call(operation_name, operation_kwargs)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 250, in _make_api_call
operation_model = self.client._service_model.operation_model(operation_name)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 387, in client
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
File "/app/.heroku/python/lib/python2.7/site-packages/botocore/session.py", line 809, in create_client
endpoint_resolver = self.get_component('endpoint_resolver')
File "/app/.heroku/python/lib/python2.7/site-packages/botocore/session.py", line 690, in get_component
return self._components.get_component(name)
File "/app/.heroku/python/lib/python2.7/site-packages/botocore/session.py", line 875, in get_component
del self._deferred[name]
KeyError: 'endpoint_resolver'
|
KeyError
|
def session(self):
"""
Returns a valid botocore session
"""
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, "session", None) is None:
self._local.session = get_session()
return self._local.session
|
def session(self):
"""
Returns a valid botocore session
"""
if self._session is None:
self._session = get_session()
return self._session
|
https://github.com/pynamodb/PynamoDB/issues/153
|
Traceback (most recent call last):
File "./instrumentation/utils.py", line 69, in save_driver_log
driver_log.save()
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/models.py", line 379, in save
data = self._get_connection().put_item(*args, **kwargs)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/table.py", line 83, in put_item
return_item_collection_metrics=return_item_collection_metrics)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 806, in put_item
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 648, in get_identifier_map
tbl = self.get_meta_table(table_name)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 399, in get_meta_table
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 235, in dispatch
data = self._make_api_call(operation_name, operation_kwargs)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 250, in _make_api_call
operation_model = self.client._service_model.operation_model(operation_name)
File "/app/.heroku/python/lib/python2.7/site-packages/pynamodb/connection/base.py", line 387, in client
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
File "/app/.heroku/python/lib/python2.7/site-packages/botocore/session.py", line 809, in create_client
endpoint_resolver = self.get_component('endpoint_resolver')
File "/app/.heroku/python/lib/python2.7/site-packages/botocore/session.py", line 690, in get_component
return self._components.get_component(name)
File "/app/.heroku/python/lib/python2.7/site-packages/botocore/session.py", line 875, in get_component
del self._deferred[name]
KeyError: 'endpoint_resolver'
|
KeyError
|
def update_item(
self,
attribute,
value=None,
action=None,
condition=None,
conditional_operator=None,
**expected_values,
):
"""
Updates an item using the UpdateItem operation.
This should be used for updating a single attribute of an item.
:param attribute: The name of the attribute to be updated
:param value: The new value for the attribute.
:param action: The action to take if this item already exists.
See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdate
"""
warnings.warn("`Model.update_item` is deprecated in favour of `Model.update` now")
self._conditional_operator_check(conditional_operator)
args, save_kwargs = self._get_save_args(null_check=False)
attribute_cls = None
for attr_name, attr_cls in self._get_attributes().items():
if attr_name == attribute:
attribute_cls = attr_cls
break
if not attribute_cls:
raise ValueError("Attribute {0} specified does not exist".format(attr_name))
if save_kwargs.get(pythonic(RANGE_KEY)):
kwargs = {pythonic(RANGE_KEY): save_kwargs.get(pythonic(RANGE_KEY))}
else:
kwargs = {}
if len(expected_values):
kwargs.update(
expected=self._build_expected_values(
expected_values, UPDATE_FILTER_OPERATOR_MAP
)
)
kwargs[pythonic(ATTR_UPDATES)] = {
attribute_cls.attr_name: {
ACTION: action.upper() if action else None,
}
}
if value is not None:
kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name][VALUE] = {
ATTR_TYPE_MAP[attribute_cls.attr_type]: attribute_cls.serialize(value)
}
kwargs[pythonic(RETURN_VALUES)] = ALL_NEW
kwargs.update(conditional_operator=conditional_operator)
kwargs.update(condition=condition)
data = self._get_connection().update_item(*args, **kwargs)
self._throttle.add_record(data.get(CONSUMED_CAPACITY))
for name, value in data.get(ATTRIBUTES).items():
attr_name = self._dynamo_to_python_attr(name)
attr = self._get_attributes().get(attr_name)
if attr:
setattr(
self,
attr_name,
attr.deserialize(value.get(ATTR_TYPE_MAP[attr.attr_type])),
)
return data
|
def update_item(
self,
attribute,
value=None,
action=None,
condition=None,
conditional_operator=None,
**expected_values,
):
"""
Updates an item using the UpdateItem operation.
This should be used for updating a single attribute of an item.
:param attribute: The name of the attribute to be updated
:param value: The new value for the attribute.
:param action: The action to take if this item already exists.
See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdate
"""
warnings.warn("`Model.update_item` is deprecated in favour of `Model.update` now")
self._conditional_operator_check(conditional_operator)
args, save_kwargs = self._get_save_args(null_check=False)
attribute_cls = None
for attr_name, attr_cls in self._get_attributes().items():
if attr_name == attribute:
value = attr_cls.serialize(value)
attribute_cls = attr_cls
break
if not attribute_cls:
raise ValueError("Attribute {0} specified does not exist".format(attr_name))
if save_kwargs.get(pythonic(RANGE_KEY)):
kwargs = {pythonic(RANGE_KEY): save_kwargs.get(pythonic(RANGE_KEY))}
else:
kwargs = {}
if len(expected_values):
kwargs.update(
expected=self._build_expected_values(
expected_values, UPDATE_FILTER_OPERATOR_MAP
)
)
kwargs[pythonic(ATTR_UPDATES)] = {
attribute_cls.attr_name: {
ACTION: action.upper() if action else None,
}
}
if action is not None and action.upper() != DELETE:
kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name][VALUE] = {
ATTR_TYPE_MAP[attribute_cls.attr_type]: value
}
kwargs[pythonic(RETURN_VALUES)] = ALL_NEW
kwargs.update(conditional_operator=conditional_operator)
kwargs.update(condition=condition)
data = self._get_connection().update_item(*args, **kwargs)
self._throttle.add_record(data.get(CONSUMED_CAPACITY))
for name, value in data.get(ATTRIBUTES).items():
attr_name = self._dynamo_to_python_attr(name)
attr = self._get_attributes().get(attr_name)
if attr:
setattr(
self,
attr_name,
attr.deserialize(value.get(ATTR_TYPE_MAP[attr.attr_type])),
)
return data
|
https://github.com/pynamodb/PynamoDB/issues/132
|
original nicknames: {'name2', 'name3', 'name1'}
nicknames to remove: {'name2'}
expected result nicknames: {'name3', 'name1'}
actual result nicknames: None
Traceback (most recent call last):
File "mytest.py", line 34, in <module>
assert expected_result_nicknames == user.nicknames
AssertionError
|
AssertionError
|
def update(
self,
attributes=None,
actions=None,
condition=None,
conditional_operator=None,
**expected_values,
):
"""
Updates an item using the UpdateItem operation.
:param attributes: A dictionary of attributes to update in the following format
{
attr_name: {'value': 10, 'action': 'ADD'},
next_attr: {'value': True, 'action': 'PUT'},
}
"""
if attributes is not None and not isinstance(attributes, dict):
raise TypeError("the value of `attributes` is expected to be a dictionary")
if actions is not None and not isinstance(actions, list):
raise TypeError("the value of `actions` is expected to be a list")
self._conditional_operator_check(conditional_operator)
args, save_kwargs = self._get_save_args(null_check=False)
kwargs = {
pythonic(RETURN_VALUES): ALL_NEW,
"conditional_operator": conditional_operator,
}
if attributes:
kwargs[pythonic(ATTR_UPDATES)] = {}
if pythonic(RANGE_KEY) in save_kwargs:
kwargs[pythonic(RANGE_KEY)] = save_kwargs[pythonic(RANGE_KEY)]
if expected_values:
kwargs["expected"] = self._build_expected_values(
expected_values, UPDATE_FILTER_OPERATOR_MAP
)
attrs = self._get_attributes()
attributes = attributes or {}
for attr, params in attributes.items():
attribute_cls = attrs[attr]
action = params["action"] and params["action"].upper()
attr_values = {ACTION: action}
if "value" in params:
attr_values[VALUE] = self._serialize_value(attribute_cls, params["value"])
kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name] = attr_values
kwargs.update(condition=condition)
kwargs.update(actions=actions)
data = self._get_connection().update_item(*args, **kwargs)
self._throttle.add_record(data.get(CONSUMED_CAPACITY))
for name, value in data[ATTRIBUTES].items():
attr_name = self._dynamo_to_python_attr(name)
attr = self._get_attributes().get(attr_name)
if attr:
setattr(
self,
attr_name,
attr.deserialize(value.get(ATTR_TYPE_MAP[attr.attr_type])),
)
return data
|
def update(
self,
attributes=None,
actions=None,
condition=None,
conditional_operator=None,
**expected_values,
):
"""
Updates an item using the UpdateItem operation.
:param attributes: A dictionary of attributes to update in the following format
{
attr_name: {'value': 10, 'action': 'ADD'},
next_attr: {'value': True, 'action': 'PUT'},
}
"""
if attributes is not None and not isinstance(attributes, dict):
raise TypeError("the value of `attributes` is expected to be a dictionary")
if actions is not None and not isinstance(actions, list):
raise TypeError("the value of `actions` is expected to be a list")
self._conditional_operator_check(conditional_operator)
args, save_kwargs = self._get_save_args(null_check=False)
kwargs = {
pythonic(RETURN_VALUES): ALL_NEW,
"conditional_operator": conditional_operator,
}
if attributes:
kwargs[pythonic(ATTR_UPDATES)] = {}
if pythonic(RANGE_KEY) in save_kwargs:
kwargs[pythonic(RANGE_KEY)] = save_kwargs[pythonic(RANGE_KEY)]
if expected_values:
kwargs["expected"] = self._build_expected_values(
expected_values, UPDATE_FILTER_OPERATOR_MAP
)
attrs = self._get_attributes()
attributes = attributes or {}
for attr, params in attributes.items():
attribute_cls = attrs[attr]
action = params["action"] and params["action"].upper()
attr_values = {ACTION: action}
if action != DELETE:
attr_values[VALUE] = self._serialize_value(attribute_cls, params["value"])
kwargs[pythonic(ATTR_UPDATES)][attribute_cls.attr_name] = attr_values
kwargs.update(condition=condition)
kwargs.update(actions=actions)
data = self._get_connection().update_item(*args, **kwargs)
self._throttle.add_record(data.get(CONSUMED_CAPACITY))
for name, value in data[ATTRIBUTES].items():
attr_name = self._dynamo_to_python_attr(name)
attr = self._get_attributes().get(attr_name)
if attr:
setattr(
self,
attr_name,
attr.deserialize(value.get(ATTR_TYPE_MAP[attr.attr_type])),
)
return data
|
https://github.com/pynamodb/PynamoDB/issues/132
|
original nicknames: {'name2', 'name3', 'name1'}
nicknames to remove: {'name2'}
expected result nicknames: {'name3', 'name1'}
actual result nicknames: None
Traceback (most recent call last):
File "mytest.py", line 34, in <module>
assert expected_result_nicknames == user.nicknames
AssertionError
|
AssertionError
|
def _get_nearest_labels_for(self, labels):
already_sampled_negative_labels = set()
for label in labels:
plausible_labels = []
plausible_label_probabilities = []
for plausible_label in self.label_nearest_map[label]:
if plausible_label in already_sampled_negative_labels:
continue
else:
plausible_labels.append(plausible_label)
plausible_label_probabilities.append(
self.label_nearest_map[label][plausible_label]
)
# make sure the probabilities always sum up to 1
plausible_label_probabilities = np.array(
plausible_label_probabilities, dtype="float64"
)
plausible_label_probabilities += 1e-08
plausible_label_probabilities /= np.sum(plausible_label_probabilities)
if len(plausible_labels) > 0:
num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))
sampled_negative_labels = np.random.choice(
plausible_labels,
num_samples,
replace=False,
p=plausible_label_probabilities,
)
already_sampled_negative_labels.update(sampled_negative_labels)
return already_sampled_negative_labels
|
def _get_nearest_labels_for(self, labels):
already_sampled_negative_labels = set()
for label in labels:
plausible_labels = []
plausible_label_probabilities = []
for plausible_label in self.label_nearest_map[label]:
if plausible_label in already_sampled_negative_labels:
continue
else:
plausible_labels.append(plausible_label)
plausible_label_probabilities.append(
self.label_nearest_map[label][plausible_label]
)
plausible_label_probabilities /= np.sum(plausible_label_probabilities) + 1e-08
if len(plausible_labels) > 0:
num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))
sampled_negative_labels = np.random.choice(
plausible_labels,
num_samples,
replace=False,
p=plausible_label_probabilities,
)
already_sampled_negative_labels.update(sampled_negative_labels)
return already_sampled_negative_labels
|
https://github.com/flairNLP/flair/issues/1925
|
Traceback (most recent call last):
File "/home/khalder/prod_stuff/latest_flair/flair/flair/trainers/trainer.py", line 371, in train
loss = self.model.forward_loss(batch_step)
File "/home/khalder/prod_stuff/latest_flair/flair/flair/models/text_classification_model.py", line 793, in forward_loss
sentences = self._get_tars_formatted_sentences(data_points)
File "/home/khalder/prod_stuff/latest_flair/flair/flair/models/text_classification_model.py", line 735, in _get_tars_formatted_sentences
sampled_negative_labels = self._get_nearest_labels_for(positive_labels)
File "/home/khalder/prod_stuff/latest_flair/flair/flair/models/text_classification_model.py", line 708, in _get_nearest_labels_for
p=plausible_label_probabilities)
File "mtrand.pyx", line 924, in numpy.random.mtrand.RandomState.choice
ValueError: probabilities do not sum to 1
|
ValueError
|
def __init__(
self,
model: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
batch_size: int = 1,
use_scalar_mix: bool = False,
fine_tune: bool = False,
allow_long_sentences: bool = True,
**kwargs,
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param pooling_operation: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer
models tend to be huge.
:param use_scalar_mix: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# load tokenizer and transformer model
self.tokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config, **kwargs)
self.allow_long_sentences = allow_long_sentences
if allow_long_sentences:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = self.tokenizer.model_max_length // 2
else:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = 0
# model name
self.name = "transformer-word-" + str(model)
# when initializing, embeddings are in eval mode by default
self.model.eval()
self.model.to(flair.device)
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[
-1
]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
# self.mix = ScalarMix(mixture_size=len(self.layer_indexes), trainable=False)
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
self.batch_size = batch_size
self.special_tokens = []
# check if special tokens exist to circumvent error message
if self.tokenizer._bos_token:
self.special_tokens.append(self.tokenizer.bos_token)
if self.tokenizer._cls_token:
self.special_tokens.append(self.tokenizer.cls_token)
# most models have an intial BOS token, except for XLNet, T5 and GPT2
self.begin_offset = 1
if type(self.tokenizer) == XLNetTokenizer:
self.begin_offset = 0
if type(self.tokenizer) == T5Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == GPT2Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == TransfoXLTokenizer:
self.begin_offset = 0
|
def __init__(
self,
model: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
batch_size: int = 1,
use_scalar_mix: bool = False,
fine_tune: bool = False,
allow_long_sentences: bool = True,
**kwargs,
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param pooling_operation: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer
models tend to be huge.
:param use_scalar_mix: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# load tokenizer and transformer model
self.tokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config, **kwargs)
self.allow_long_sentences = allow_long_sentences
if allow_long_sentences:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = self.tokenizer.model_max_length // 2
else:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = 0
# model name
self.name = "transformer-word-" + str(model)
# when initializing, embeddings are in eval mode by default
self.model.eval()
self.model.to(flair.device)
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[
-1
]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
# self.mix = ScalarMix(mixture_size=len(self.layer_indexes), trainable=False)
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
self.batch_size = batch_size
self.special_tokens = []
# check if special tokens exist to circumvent error message
if self.tokenizer._bos_token:
self.special_tokens.append(self.tokenizer.bos_token)
if self.tokenizer._cls_token:
self.special_tokens.append(self.tokenizer.cls_token)
# most models have an intial BOS token, except for XLNet, T5 and GPT2
self.begin_offset = 1
if type(self.tokenizer) == XLNetTokenizer:
self.begin_offset = 0
if type(self.tokenizer) == T5Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == GPT2Tokenizer:
self.begin_offset = 0
|
https://github.com/flairNLP/flair/issues/2008
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
model.embed(Sentence("tiago"))
File "/home/tiago/.local/lib/python3.8/site-packages/flair/embeddings/base.py", line 60, in embed
self._add_embeddings_internal(sentences)
File "/home/tiago/.local/lib/python3.8/site-packages/flair/embeddings/token.py", line 861, in _add_embeddings_internal
self._add_embeddings_to_sentences(batch)
File "/home/tiago/.local/lib/python3.8/site-packages/flair/embeddings/token.py", line 972, in _add_embeddings_to_sentences
hidden_states = self.model(input_ids, attention_mask=mask)[-1]
File "/home/tiago/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() got an unexpected keyword argument 'attention_mask'
|
TypeError
|
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
"""Match subtokenization to Flair tokenization and extract embeddings from transformers for each token."""
# first, subtokenize each sentence and find out into how many subtokens each token was divided
subtokenized_sentences = []
subtokenized_sentences_token_lengths = []
sentence_parts_lengths = []
# TODO: keep for backwards compatibility, but remove in future
# some pretrained models do not have this property, applying default settings now.
# can be set manually after loading the model.
if not hasattr(self, "max_subtokens_sequence_length"):
self.max_subtokens_sequence_length = None
self.allow_long_sentences = False
self.stride = 0
non_empty_sentences = []
empty_sentences = []
for sentence in sentences:
tokenized_string = sentence.to_tokenized_string()
# method 1: subtokenize sentence
# subtokenized_sentence = self.tokenizer.encode(tokenized_string, add_special_tokens=True)
# method 2:
# transformer specific tokenization
subtokenized_sentence = self.tokenizer.tokenize(tokenized_string)
if len(subtokenized_sentence) == 0:
empty_sentences.append(sentence)
continue
else:
non_empty_sentences.append(sentence)
token_subtoken_lengths = self.reconstruct_tokens_from_subtokens(
sentence, subtokenized_sentence
)
subtokenized_sentences_token_lengths.append(token_subtoken_lengths)
subtoken_ids_sentence = self.tokenizer.convert_tokens_to_ids(
subtokenized_sentence
)
nr_sentence_parts = 0
while subtoken_ids_sentence:
nr_sentence_parts += 1
encoded_inputs = self.tokenizer.encode_plus(
subtoken_ids_sentence,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=True,
)
subtoken_ids_split_sentence = encoded_inputs["input_ids"]
subtokenized_sentences.append(
torch.tensor(subtoken_ids_split_sentence, dtype=torch.long)
)
if "overflowing_tokens" in encoded_inputs:
subtoken_ids_sentence = encoded_inputs["overflowing_tokens"]
else:
subtoken_ids_sentence = None
sentence_parts_lengths.append(nr_sentence_parts)
# empty sentences get zero embeddings
for sentence in empty_sentences:
for token in sentence:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
# only embed non-empty sentences and if there is at least one
sentences = non_empty_sentences
if len(sentences) == 0:
return
# find longest sentence in batch
longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))
total_sentence_parts = sum(sentence_parts_lengths)
# initialize batch tensors and mask
input_ids = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
mask = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
for s_id, sentence in enumerate(subtokenized_sentences):
sequence_length = len(sentence)
input_ids[s_id][:sequence_length] = sentence
mask[s_id][:sequence_length] = torch.ones(sequence_length)
# put encoded batch through transformer model to get all hidden states of all encoder layers
if type(self.tokenizer) == TransfoXLTokenizer:
hidden_states = self.model(input_ids)[-1]
else:
hidden_states = self.model(input_ids, attention_mask=mask)[
-1
] # make the tuple a tensor; makes working with it easier.
hidden_states = torch.stack(hidden_states)
sentence_idx_offset = 0
# gradients are enabled if fine-tuning is enabled
gradient_context = (
torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
)
with gradient_context:
# iterate over all subtokenized sentences
for sentence_idx, (sentence, subtoken_lengths, nr_sentence_parts) in enumerate(
zip(sentences, subtokenized_sentences_token_lengths, sentence_parts_lengths)
):
sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
for i in range(1, nr_sentence_parts):
sentence_idx_offset += 1
remainder_sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
# remove stride_size//2 at end of sentence_hidden_state, and half at beginning of remainder,
# in order to get some context into the embeddings of these words.
# also don't include the embedding of the extra [CLS] and [SEP] tokens.
sentence_hidden_state = torch.cat(
(
sentence_hidden_state[:, : -1 - self.stride // 2, :],
remainder_sentence_hidden_state[:, 1 + self.stride // 2 :, :],
),
1,
)
subword_start_idx = self.begin_offset
# for each token, get embedding
for token_idx, (token, number_of_subtokens) in enumerate(
zip(sentence, subtoken_lengths)
):
# some tokens have no subtokens at all (if omitted by BERT tokenizer) so return zero vector
if number_of_subtokens == 0:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
subword_end_idx = subword_start_idx + number_of_subtokens
subtoken_embeddings: List[torch.FloatTensor] = []
# get states from all selected layers, aggregate with pooling operation
for layer in self.layer_indexes:
current_embeddings = sentence_hidden_state[layer][
subword_start_idx:subword_end_idx
]
if self.pooling_operation == "first":
final_embedding: torch.FloatTensor = current_embeddings[0]
if self.pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
if self.pooling_operation == "first_last":
final_embedding: torch.Tensor = torch.cat(
[current_embeddings[0], current_embeddings[-1]]
)
if self.pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in current_embeddings
]
final_embedding: torch.Tensor = torch.mean(
torch.cat(all_embeddings, dim=0), dim=0
)
subtoken_embeddings.append(final_embedding)
# use scalar mix of embeddings if so selected
if self.use_scalar_mix:
sm_embeddings = torch.mean(
torch.stack(subtoken_embeddings, dim=1), dim=1
)
# sm_embeddings = self.mix(subtoken_embeddings)
subtoken_embeddings = [sm_embeddings]
# set the extracted embedding for the token
token.set_embedding(self.name, torch.cat(subtoken_embeddings))
subword_start_idx += number_of_subtokens
|
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
"""Match subtokenization to Flair tokenization and extract embeddings from transformers for each token."""
# first, subtokenize each sentence and find out into how many subtokens each token was divided
subtokenized_sentences = []
subtokenized_sentences_token_lengths = []
sentence_parts_lengths = []
# TODO: keep for backwards compatibility, but remove in future
# some pretrained models do not have this property, applying default settings now.
# can be set manually after loading the model.
if not hasattr(self, "max_subtokens_sequence_length"):
self.max_subtokens_sequence_length = None
self.allow_long_sentences = False
self.stride = 0
non_empty_sentences = []
empty_sentences = []
for sentence in sentences:
tokenized_string = sentence.to_tokenized_string()
# method 1: subtokenize sentence
# subtokenized_sentence = self.tokenizer.encode(tokenized_string, add_special_tokens=True)
# method 2:
# transformer specific tokenization
subtokenized_sentence = self.tokenizer.tokenize(tokenized_string)
if len(subtokenized_sentence) == 0:
empty_sentences.append(sentence)
continue
else:
non_empty_sentences.append(sentence)
token_subtoken_lengths = self.reconstruct_tokens_from_subtokens(
sentence, subtokenized_sentence
)
subtokenized_sentences_token_lengths.append(token_subtoken_lengths)
subtoken_ids_sentence = self.tokenizer.convert_tokens_to_ids(
subtokenized_sentence
)
nr_sentence_parts = 0
while subtoken_ids_sentence:
nr_sentence_parts += 1
encoded_inputs = self.tokenizer.encode_plus(
subtoken_ids_sentence,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=True,
)
subtoken_ids_split_sentence = encoded_inputs["input_ids"]
subtokenized_sentences.append(
torch.tensor(subtoken_ids_split_sentence, dtype=torch.long)
)
if "overflowing_tokens" in encoded_inputs:
subtoken_ids_sentence = encoded_inputs["overflowing_tokens"]
else:
subtoken_ids_sentence = None
sentence_parts_lengths.append(nr_sentence_parts)
# empty sentences get zero embeddings
for sentence in empty_sentences:
for token in sentence:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
# only embed non-empty sentences and if there is at least one
sentences = non_empty_sentences
if len(sentences) == 0:
return
# find longest sentence in batch
longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))
total_sentence_parts = sum(sentence_parts_lengths)
# initialize batch tensors and mask
input_ids = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
mask = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
for s_id, sentence in enumerate(subtokenized_sentences):
sequence_length = len(sentence)
input_ids[s_id][:sequence_length] = sentence
mask[s_id][:sequence_length] = torch.ones(sequence_length)
# put encoded batch through transformer model to get all hidden states of all encoder layers
hidden_states = self.model(input_ids, attention_mask=mask)[-1]
# make the tuple a tensor; makes working with it easier.
hidden_states = torch.stack(hidden_states)
sentence_idx_offset = 0
# gradients are enabled if fine-tuning is enabled
gradient_context = (
torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
)
with gradient_context:
# iterate over all subtokenized sentences
for sentence_idx, (sentence, subtoken_lengths, nr_sentence_parts) in enumerate(
zip(sentences, subtokenized_sentences_token_lengths, sentence_parts_lengths)
):
sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
for i in range(1, nr_sentence_parts):
sentence_idx_offset += 1
remainder_sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
# remove stride_size//2 at end of sentence_hidden_state, and half at beginning of remainder,
# in order to get some context into the embeddings of these words.
# also don't include the embedding of the extra [CLS] and [SEP] tokens.
sentence_hidden_state = torch.cat(
(
sentence_hidden_state[:, : -1 - self.stride // 2, :],
remainder_sentence_hidden_state[:, 1 + self.stride // 2 :, :],
),
1,
)
subword_start_idx = self.begin_offset
# for each token, get embedding
for token_idx, (token, number_of_subtokens) in enumerate(
zip(sentence, subtoken_lengths)
):
# some tokens have no subtokens at all (if omitted by BERT tokenizer) so return zero vector
if number_of_subtokens == 0:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
subword_end_idx = subword_start_idx + number_of_subtokens
subtoken_embeddings: List[torch.FloatTensor] = []
# get states from all selected layers, aggregate with pooling operation
for layer in self.layer_indexes:
current_embeddings = sentence_hidden_state[layer][
subword_start_idx:subword_end_idx
]
if self.pooling_operation == "first":
final_embedding: torch.FloatTensor = current_embeddings[0]
if self.pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
if self.pooling_operation == "first_last":
final_embedding: torch.Tensor = torch.cat(
[current_embeddings[0], current_embeddings[-1]]
)
if self.pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in current_embeddings
]
final_embedding: torch.Tensor = torch.mean(
torch.cat(all_embeddings, dim=0), dim=0
)
subtoken_embeddings.append(final_embedding)
# use scalar mix of embeddings if so selected
if self.use_scalar_mix:
sm_embeddings = torch.mean(
torch.stack(subtoken_embeddings, dim=1), dim=1
)
# sm_embeddings = self.mix(subtoken_embeddings)
subtoken_embeddings = [sm_embeddings]
# set the extracted embedding for the token
token.set_embedding(self.name, torch.cat(subtoken_embeddings))
subword_start_idx += number_of_subtokens
|
https://github.com/flairNLP/flair/issues/2008
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
model.embed(Sentence("tiago"))
File "/home/tiago/.local/lib/python3.8/site-packages/flair/embeddings/base.py", line 60, in embed
self._add_embeddings_internal(sentences)
File "/home/tiago/.local/lib/python3.8/site-packages/flair/embeddings/token.py", line 861, in _add_embeddings_internal
self._add_embeddings_to_sentences(batch)
File "/home/tiago/.local/lib/python3.8/site-packages/flair/embeddings/token.py", line 972, in _add_embeddings_to_sentences
hidden_states = self.model(input_ids, attention_mask=mask)[-1]
File "/home/tiago/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() got an unexpected keyword argument 'attention_mask'
|
TypeError
|
def _convert_lines_to_sentence(self, lines):
sentence: Sentence = Sentence()
for line in lines:
# skip comments
if self.comment_symbol is not None and line.startswith(self.comment_symbol):
continue
# if sentence ends, convert and return
if self.__line_completes_sentence(line):
if len(sentence) > 0:
if self.tag_to_bioes is not None:
sentence.convert_tag_scheme(
tag_type=self.tag_to_bioes, target_scheme="iobes"
)
# check if this sentence is a document boundary
if sentence.to_original_text() == self.document_separator_token:
sentence.is_document_boundary = True
return sentence
# otherwise, this line is a token. parse and add to sentence
else:
token = self._parse_token(line)
sentence.add_token(token)
# check if this sentence is a document boundary
if sentence.to_original_text() == self.document_separator_token:
sentence.is_document_boundary = True
if self.tag_to_bioes is not None:
sentence.convert_tag_scheme(tag_type=self.tag_to_bioes, target_scheme="iobes")
if len(sentence) > 0:
return sentence
|
def _convert_lines_to_sentence(self, lines):
sentence: Sentence = Sentence()
for line in lines:
# skip comments
if self.comment_symbol is not None and line.startswith(self.comment_symbol):
continue
# if sentence ends, convert and return
if self.__line_completes_sentence(line):
if len(sentence) > 0:
if self.tag_to_bioes is not None:
sentence.convert_tag_scheme(
tag_type=self.tag_to_bioes, target_scheme="iobes"
)
# check if this sentence is a document boundary
if sentence.to_original_text() == self.document_separator_token:
sentence.is_document_boundary = True
return sentence
# otherwise, this line is a token. parse and add to sentence
else:
token = self._parse_token(line)
sentence.add_token(token)
# check if this sentence is a document boundary
if sentence.to_original_text() == self.document_separator_token:
sentence.is_document_boundary = True
if len(sentence) > 0:
return sentence
|
https://github.com/flairNLP/flair/issues/1747
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-2f413379aedd> in <module>
6
7 # load the NER tagger
----> 8 tagger = SequenceTagger.load('best-model.pt')
9
10
c:\users\nicod\miniconda3\lib\site-packages\flair\nn.py in load(cls, model)
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
---> 88 state = torch.load(f, map_location='cpu')
89
90 model = cls._init_model_with_state_dict(state)
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
c:\users\nicod\miniconda3\lib\site-packages\transformers\tokenization_camembert.py in __setstate__(self, d)
259 raise
260 self.sp_model = spm.SentencePieceProcessor()
--> 261 self.sp_model.Load(self.vocab_file)
262
263 def convert_tokens_to_string(self, tokens):
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in Load(self, model_file, model_proto)
365 if model_proto:
366 return self.LoadFromSerializedProto(model_proto)
--> 367 return self.LoadFromFile(model_file)
368
369
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in LoadFromFile(self, arg)
175
176 def LoadFromFile(self, arg):
--> 177 return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
178
179 def Init(self,
OSError: Not found: "/home/ubuntu/.cache/torch/transformers/3715e3a4a2de48834619b2a6f48979e13ddff5cabfb1f3409db689f9ce3bb98f.28d30f926f545047fc59da64289371eef0fbdc0764ce9ec56f808a646fcfec59": No such file or directory Error #2
|
OSError
|
def __init__(
self,
model: str = "bert-base-uncased",
layers: str = "all",
subtoken_pooling: str = "first",
layer_mean: bool = True,
fine_tune: bool = False,
allow_long_sentences: bool = True,
use_context: Union[bool, int] = False,
memory_effective_training: bool = True,
respect_document_boundaries: bool = True,
**kwargs,
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param subtoken_pooling: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param layer_mean: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# load tokenizer and transformer model
self.tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
if not "config" in kwargs:
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config, **kwargs)
else:
self.model = AutoModel.from_pretrained(None, **kwargs)
self.allow_long_sentences = allow_long_sentences
if allow_long_sentences:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = self.tokenizer.model_max_length // 2
else:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = 0
# model name
self.name = "transformer-word-" + str(model)
self.base_model = str(model)
# whether to detach gradients on overlong sentences
self.memory_effective_training = memory_effective_training
# store whether to use context (and how much)
if type(use_context) == bool:
self.context_length: int = 64 if use_context else 0
if type(use_context) == int:
self.context_length: int = use_context
# if using context, can we cross document boundaries?
self.respect_document_boundaries = respect_document_boundaries
# when initializing, embeddings are in eval mode by default
self.model.eval()
self.model.to(flair.device)
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[
-1
]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = subtoken_pooling
self.layer_mean = layer_mean
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
# calculate embedding length
if not self.layer_mean:
length = len(self.layer_indexes) * self.model.config.hidden_size
else:
length = self.model.config.hidden_size
if self.pooling_operation == "first_last":
length *= 2
# return length
self.embedding_length_internal = length
self.special_tokens = []
# check if special tokens exist to circumvent error message
if self.tokenizer._bos_token:
self.special_tokens.append(self.tokenizer.bos_token)
if self.tokenizer._cls_token:
self.special_tokens.append(self.tokenizer.cls_token)
# most models have an intial BOS token, except for XLNet, T5 and GPT2
self.begin_offset = 1
if type(self.tokenizer) == XLNetTokenizer:
self.begin_offset = 0
if type(self.tokenizer) == T5Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == GPT2Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == TransfoXLTokenizer:
self.begin_offset = 0
|
def __init__(
self,
model: str = "bert-base-uncased",
layers: str = "all",
subtoken_pooling: str = "first",
layer_mean: bool = True,
fine_tune: bool = False,
allow_long_sentences: bool = True,
use_context: Union[bool, int] = False,
**kwargs,
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param subtoken_pooling: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param layer_mean: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
super().__init__()
self.instance_parameters = self.get_instance_parameters(locals=locals())
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# load tokenizer and transformer model
self.tokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config, **kwargs)
self.allow_long_sentences = allow_long_sentences
if allow_long_sentences:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = self.tokenizer.model_max_length // 2
else:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = 0
# model name
self.name = "transformer-word-" + str(model)
# store whether to use context (and how much)
if type(use_context) == bool:
self.context_length: int = 64 if use_context else 0
if type(use_context) == int:
self.context_length: int = use_context
# when initializing, embeddings are in eval mode by default
self.model.eval()
self.model.to(flair.device)
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[
-1
]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = subtoken_pooling
self.layer_mean = layer_mean
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
self.special_tokens = []
# check if special tokens exist to circumvent error message
if self.tokenizer._bos_token:
self.special_tokens.append(self.tokenizer.bos_token)
if self.tokenizer._cls_token:
self.special_tokens.append(self.tokenizer.cls_token)
# most models have an intial BOS token, except for XLNet, T5 and GPT2
self.begin_offset = 1
if type(self.tokenizer) == XLNetTokenizer:
self.begin_offset = 0
if type(self.tokenizer) == T5Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == GPT2Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == TransfoXLTokenizer:
self.begin_offset = 0
|
https://github.com/flairNLP/flair/issues/1747
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-2f413379aedd> in <module>
6
7 # load the NER tagger
----> 8 tagger = SequenceTagger.load('best-model.pt')
9
10
c:\users\nicod\miniconda3\lib\site-packages\flair\nn.py in load(cls, model)
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
---> 88 state = torch.load(f, map_location='cpu')
89
90 model = cls._init_model_with_state_dict(state)
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
c:\users\nicod\miniconda3\lib\site-packages\transformers\tokenization_camembert.py in __setstate__(self, d)
259 raise
260 self.sp_model = spm.SentencePieceProcessor()
--> 261 self.sp_model.Load(self.vocab_file)
262
263 def convert_tokens_to_string(self, tokens):
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in Load(self, model_file, model_proto)
365 if model_proto:
366 return self.LoadFromSerializedProto(model_proto)
--> 367 return self.LoadFromFile(model_file)
368
369
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in LoadFromFile(self, arg)
175
176 def LoadFromFile(self, arg):
--> 177 return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
178
179 def Init(self,
OSError: Not found: "/home/ubuntu/.cache/torch/transformers/3715e3a4a2de48834619b2a6f48979e13ddff5cabfb1f3409db689f9ce3bb98f.28d30f926f545047fc59da64289371eef0fbdc0764ce9ec56f808a646fcfec59": No such file or directory Error #2
|
OSError
|
def _add_embeddings_to_sentence(self, sentence: Sentence):
"""Match subtokenization to Flair tokenization and extract embeddings from transformers for each token."""
# TODO: keep for backwards compatibility, but remove in future
# some pretrained models do not have this property, applying default settings now.
# can be set manually after loading the model.
if not hasattr(self, "max_subtokens_sequence_length"):
self.max_subtokens_sequence_length = None
self.allow_long_sentences = False
self.stride = 0
# if we also use context, first expand sentence to include context
if self.context_length > 0:
# in case of contextualization, we must remember non-expanded sentence
original_sentence = sentence
# create expanded sentence and remember context offsets
expanded_sentence, context_offset = self._expand_sentence_with_context(sentence)
# overwrite sentence with expanded sentence
sentence = expanded_sentence
# subtokenize the sentence
tokenized_string = sentence.to_tokenized_string()
# method 1: subtokenize sentence
# subtokenized_sentence = self.tokenizer.encode(tokenized_string, add_special_tokens=True)
# method 2:
# transformer specific tokenization
subtokenized_sentence = self.tokenizer.tokenize(tokenized_string)
# set zero embeddings for empty sentences and return
if len(subtokenized_sentence) == 0:
for token in sentence:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
return
# determine into how many subtokens each token is split
token_subtoken_lengths = self.reconstruct_tokens_from_subtokens(
sentence, subtokenized_sentence
)
# if sentence is too long, will be split into multiple parts
sentence_splits = []
# check if transformer version 3 is used - in this case use old handling
import transformers
if transformers.__version__.startswith("3"):
# get sentence as list of subtoken ids
subtoken_ids_sentence = self.tokenizer.convert_tokens_to_ids(
subtokenized_sentence
)
while subtoken_ids_sentence:
encoded_inputs = self.tokenizer.encode_plus(
subtoken_ids_sentence,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=True,
)
sentence_splits.append(
torch.tensor(encoded_inputs["input_ids"], dtype=torch.long)
)
if "overflowing_tokens" in encoded_inputs:
subtoken_ids_sentence = encoded_inputs["overflowing_tokens"]
else:
subtoken_ids_sentence = None
# else if current transformer is used, use default handling
else:
encoded_inputs = self.tokenizer.encode_plus(
tokenized_string,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=True,
)
# overlong sentences are handled as multiple splits
for encoded_input in encoded_inputs["input_ids"]:
sentence_splits.append(torch.tensor(encoded_input, dtype=torch.long))
# embed each sentence split
hidden_states_of_all_splits = []
for split_number, sentence_split in enumerate(sentence_splits):
# initialize batch tensors and mask
input_ids = sentence_split.unsqueeze(0).to(flair.device)
# propagate gradients if fine-tuning and only during training
propagate_gradients = self.fine_tune and self.training
# increase memory effectiveness by skipping all but last sentence split
if (
propagate_gradients
and self.memory_effective_training
and split_number < len(sentence_splits) - 1
):
propagate_gradients = False
# put encoded batch through transformer model to get all hidden states of all encoder layers
if propagate_gradients:
hidden_states = self.model(input_ids)[
-1
] # make the tuple a tensor; makes working with it easier.
else:
with torch.no_grad(): # deactivate gradients if not necessary
hidden_states = self.model(input_ids)[-1]
# get hidden states as single tensor
split_hidden_state = torch.stack(hidden_states)[:, 0, ...]
hidden_states_of_all_splits.append(split_hidden_state)
# put splits back together into one tensor using overlapping strides
hidden_states = hidden_states_of_all_splits[0]
for i in range(1, len(hidden_states_of_all_splits)):
hidden_states = hidden_states[:, : -1 - self.stride // 2, :]
next_split = hidden_states_of_all_splits[i]
next_split = next_split[:, 1 + self.stride // 2 :, :]
hidden_states = torch.cat([hidden_states, next_split], 1)
subword_start_idx = self.begin_offset
# for each token, get embedding
for token_idx, (token, number_of_subtokens) in enumerate(
zip(sentence, token_subtoken_lengths)
):
# some tokens have no subtokens at all (if omitted by BERT tokenizer) so return zero vector
if number_of_subtokens == 0:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
subword_end_idx = subword_start_idx + number_of_subtokens
subtoken_embeddings: List[torch.FloatTensor] = []
# get states from all selected layers, aggregate with pooling operation
for layer in self.layer_indexes:
current_embeddings = hidden_states[layer][subword_start_idx:subword_end_idx]
if self.pooling_operation == "first":
final_embedding: torch.FloatTensor = current_embeddings[0]
if self.pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
if self.pooling_operation == "first_last":
final_embedding: torch.Tensor = torch.cat(
[current_embeddings[0], current_embeddings[-1]]
)
if self.pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in current_embeddings
]
final_embedding: torch.Tensor = torch.mean(
torch.cat(all_embeddings, dim=0), dim=0
)
subtoken_embeddings.append(final_embedding)
# use layer mean of embeddings if so selected
if self.layer_mean and len(self.layer_indexes) > 1:
sm_embeddings = torch.mean(torch.stack(subtoken_embeddings, dim=1), dim=1)
subtoken_embeddings = [sm_embeddings]
# set the extracted embedding for the token
token.set_embedding(self.name, torch.cat(subtoken_embeddings))
subword_start_idx += number_of_subtokens
# move embeddings from context back to original sentence (if using context)
if self.context_length > 0:
for token_idx, token in enumerate(original_sentence):
token.set_embedding(
self.name, sentence[token_idx + context_offset].get_embedding(self.name)
)
sentence = original_sentence
|
def _add_embeddings_to_sentence(self, sentence: Sentence):
"""Match subtokenization to Flair tokenization and extract embeddings from transformers for each token."""
# TODO: keep for backwards compatibility, but remove in future
# some pretrained models do not have this property, applying default settings now.
# can be set manually after loading the model.
if not hasattr(self, "max_subtokens_sequence_length"):
self.max_subtokens_sequence_length = None
self.allow_long_sentences = False
self.stride = 0
# if we also use context, first expand sentence to include context
if self.context_length > 0:
# in case of contextualization, we must remember non-expanded sentence
original_sentence = sentence
# create expanded sentence and remember context offsets
expanded_sentence, context_offset = self._expand_sentence_with_context(sentence)
# overwrite sentence with expanded sentence
sentence = expanded_sentence
# subtokenize the sentence
tokenized_string = sentence.to_tokenized_string()
# method 1: subtokenize sentence
# subtokenized_sentence = self.tokenizer.encode(tokenized_string, add_special_tokens=True)
# method 2:
# transformer specific tokenization
subtokenized_sentence = self.tokenizer.tokenize(tokenized_string)
# set zero embeddings for empty sentences and return
if len(subtokenized_sentence) == 0:
for token in sentence:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
return
# determine into how many subtokens each token is split
token_subtoken_lengths = self.reconstruct_tokens_from_subtokens(
sentence, subtokenized_sentence
)
# get sentence as list of subtoken ids
subtoken_ids_sentence = self.tokenizer.convert_tokens_to_ids(subtokenized_sentence)
# if sentence is too long, will be split into multiple parts
sentence_splits = []
while subtoken_ids_sentence:
encoded_inputs = self.tokenizer.encode_plus(
subtoken_ids_sentence,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
truncation=True,
)
sentence_splits.append(
torch.tensor(encoded_inputs["input_ids"], dtype=torch.long)
)
if "overflowing_tokens" in encoded_inputs:
subtoken_ids_sentence = encoded_inputs["overflowing_tokens"]
else:
subtoken_ids_sentence = None
# gradients are enabled if fine-tuning is enabled
gradient_context = (
torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
)
with gradient_context:
# embed each sentence split
hidden_states_of_all_splits = []
for sentence_split in sentence_splits:
# initialize batch tensors and mask
input_ids = sentence_split.unsqueeze(0).to(flair.device)
# put encoded batch through transformer model to get all hidden states of all encoder layers
hidden_states = self.model(input_ids)[
-1
] # make the tuple a tensor; makes working with it easier.
# get hidden states as single tensor
split_hidden_state = torch.stack(hidden_states)[:, 0, ...]
hidden_states_of_all_splits.append(split_hidden_state)
# put splits back together into one tensor using overlapping strides
hidden_states = hidden_states_of_all_splits[0]
for i in range(1, len(hidden_states_of_all_splits)):
hidden_states = hidden_states[:, : -1 - self.stride // 2, :]
next_split = hidden_states_of_all_splits[i]
next_split = next_split[:, 1 + self.stride // 2 :, :]
hidden_states = torch.cat([hidden_states, next_split], 1)
subword_start_idx = self.begin_offset
# for each token, get embedding
for token_idx, (token, number_of_subtokens) in enumerate(
zip(sentence, token_subtoken_lengths)
):
# some tokens have no subtokens at all (if omitted by BERT tokenizer) so return zero vector
if number_of_subtokens == 0:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
subword_end_idx = subword_start_idx + number_of_subtokens
subtoken_embeddings: List[torch.FloatTensor] = []
# get states from all selected layers, aggregate with pooling operation
for layer in self.layer_indexes:
current_embeddings = hidden_states[layer][
subword_start_idx:subword_end_idx
]
if self.pooling_operation == "first":
final_embedding: torch.FloatTensor = current_embeddings[0]
if self.pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
if self.pooling_operation == "first_last":
final_embedding: torch.Tensor = torch.cat(
[current_embeddings[0], current_embeddings[-1]]
)
if self.pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in current_embeddings
]
final_embedding: torch.Tensor = torch.mean(
torch.cat(all_embeddings, dim=0), dim=0
)
subtoken_embeddings.append(final_embedding)
# use layer mean of embeddings if so selected
if self.layer_mean and len(self.layer_indexes) > 1:
sm_embeddings = torch.mean(
torch.stack(subtoken_embeddings, dim=1), dim=1
)
subtoken_embeddings = [sm_embeddings]
# set the extracted embedding for the token
token.set_embedding(self.name, torch.cat(subtoken_embeddings))
subword_start_idx += number_of_subtokens
# move embeddings from context back to original sentence (if using context)
if self.context_length > 0:
for token_idx, token in enumerate(original_sentence):
token.set_embedding(
self.name,
sentence[token_idx + context_offset].get_embedding(self.name),
)
sentence = original_sentence
|
https://github.com/flairNLP/flair/issues/1747
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-2f413379aedd> in <module>
6
7 # load the NER tagger
----> 8 tagger = SequenceTagger.load('best-model.pt')
9
10
c:\users\nicod\miniconda3\lib\site-packages\flair\nn.py in load(cls, model)
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
---> 88 state = torch.load(f, map_location='cpu')
89
90 model = cls._init_model_with_state_dict(state)
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
c:\users\nicod\miniconda3\lib\site-packages\transformers\tokenization_camembert.py in __setstate__(self, d)
259 raise
260 self.sp_model = spm.SentencePieceProcessor()
--> 261 self.sp_model.Load(self.vocab_file)
262
263 def convert_tokens_to_string(self, tokens):
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in Load(self, model_file, model_proto)
365 if model_proto:
366 return self.LoadFromSerializedProto(model_proto)
--> 367 return self.LoadFromFile(model_file)
368
369
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in LoadFromFile(self, arg)
175
176 def LoadFromFile(self, arg):
--> 177 return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
178
179 def Init(self,
OSError: Not found: "/home/ubuntu/.cache/torch/transformers/3715e3a4a2de48834619b2a6f48979e13ddff5cabfb1f3409db689f9ce3bb98f.28d30f926f545047fc59da64289371eef0fbdc0764ce9ec56f808a646fcfec59": No such file or directory Error #2
|
OSError
|
def _expand_sentence_with_context(self, sentence):
# remember original sentence
original_sentence = sentence
# get left context
left_context = ""
while True:
sentence = sentence.previous_sentence()
if sentence is None:
break
if self.respect_document_boundaries and sentence.is_document_boundary:
break
left_context = sentence.to_tokenized_string() + " " + left_context
left_context = left_context.strip()
if len(left_context.split(" ")) > self.context_length:
left_context = " ".join(left_context.split(" ")[-self.context_length :])
break
context_length = len(left_context.split(" "))
original_sentence.left_context = left_context
# get right context
sentence = original_sentence
right_context = ""
while True:
sentence = sentence.next_sentence()
if sentence is None:
break
if self.respect_document_boundaries and sentence.is_document_boundary:
break
right_context += " " + sentence.to_tokenized_string()
right_context = right_context.strip()
if len(right_context.split(" ")) > self.context_length:
right_context = " ".join(right_context.split(" ")[: self.context_length])
break
original_sentence.right_context = right_context
# make expanded sentence
expanded_sentence = Sentence()
expanded_sentence.tokens = [
Token(token)
for token in left_context.split(" ")
+ original_sentence.to_tokenized_string().split(" ")
+ right_context.split(" ")
]
return expanded_sentence, context_length
|
def _expand_sentence_with_context(self, sentence):
# remember original sentence
original_sentence = sentence
# get left context
left_context = ""
while True:
sentence = sentence.previous_sentence()
if sentence is None:
break
if sentence.is_document_boundary:
break
left_context = sentence.to_tokenized_string() + " " + left_context
left_context = left_context.strip()
if len(left_context.split(" ")) > self.context_length:
left_context = " ".join(left_context.split(" ")[-self.context_length :])
break
context_length = len(left_context.split(" "))
original_sentence.left_context = left_context
# get right context
sentence = original_sentence
right_context = ""
while True:
sentence = sentence.next_sentence()
if sentence is None:
break
if sentence.is_document_boundary:
break
right_context += " " + sentence.to_tokenized_string()
right_context = right_context.strip()
if len(right_context.split(" ")) > self.context_length:
right_context = " ".join(right_context.split(" ")[: self.context_length])
break
original_sentence.right_context = right_context
# make expanded sentence
expanded_sentence = Sentence()
expanded_sentence.tokens = [
Token(token)
for token in left_context.split(" ")
+ original_sentence.to_tokenized_string().split(" ")
+ right_context.split(" ")
]
return expanded_sentence, context_length
|
https://github.com/flairNLP/flair/issues/1747
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-2f413379aedd> in <module>
6
7 # load the NER tagger
----> 8 tagger = SequenceTagger.load('best-model.pt')
9
10
c:\users\nicod\miniconda3\lib\site-packages\flair\nn.py in load(cls, model)
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
---> 88 state = torch.load(f, map_location='cpu')
89
90 model = cls._init_model_with_state_dict(state)
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
c:\users\nicod\miniconda3\lib\site-packages\transformers\tokenization_camembert.py in __setstate__(self, d)
259 raise
260 self.sp_model = spm.SentencePieceProcessor()
--> 261 self.sp_model.Load(self.vocab_file)
262
263 def convert_tokens_to_string(self, tokens):
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in Load(self, model_file, model_proto)
365 if model_proto:
366 return self.LoadFromSerializedProto(model_proto)
--> 367 return self.LoadFromFile(model_file)
368
369
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in LoadFromFile(self, arg)
175
176 def LoadFromFile(self, arg):
--> 177 return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
178
179 def Init(self,
OSError: Not found: "/home/ubuntu/.cache/torch/transformers/3715e3a4a2de48834619b2a6f48979e13ddff5cabfb1f3409db689f9ce3bb98f.28d30f926f545047fc59da64289371eef0fbdc0764ce9ec56f808a646fcfec59": No such file or directory Error #2
|
OSError
|
def embedding_length(self) -> int:
if "embedding_length_internal" in self.__dict__.keys():
return self.embedding_length_internal
# """Returns the length of the embedding vector."""
if not self.layer_mean:
length = len(self.layer_indexes) * self.model.config.hidden_size
else:
length = self.model.config.hidden_size
if self.pooling_operation == "first_last":
length *= 2
self.__embedding_length = length
return length
|
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
if not self.layer_mean:
length = len(self.layer_indexes) * self.model.config.hidden_size
else:
length = self.model.config.hidden_size
if self.pooling_operation == "first_last":
length *= 2
return length
|
https://github.com/flairNLP/flair/issues/1747
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-2f413379aedd> in <module>
6
7 # load the NER tagger
----> 8 tagger = SequenceTagger.load('best-model.pt')
9
10
c:\users\nicod\miniconda3\lib\site-packages\flair\nn.py in load(cls, model)
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
---> 88 state = torch.load(f, map_location='cpu')
89
90 model = cls._init_model_with_state_dict(state)
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
c:\users\nicod\miniconda3\lib\site-packages\transformers\tokenization_camembert.py in __setstate__(self, d)
259 raise
260 self.sp_model = spm.SentencePieceProcessor()
--> 261 self.sp_model.Load(self.vocab_file)
262
263 def convert_tokens_to_string(self, tokens):
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in Load(self, model_file, model_proto)
365 if model_proto:
366 return self.LoadFromSerializedProto(model_proto)
--> 367 return self.LoadFromFile(model_file)
368
369
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in LoadFromFile(self, arg)
175
176 def LoadFromFile(self, arg):
--> 177 return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
178
179 def Init(self,
OSError: Not found: "/home/ubuntu/.cache/torch/transformers/3715e3a4a2de48834619b2a6f48979e13ddff5cabfb1f3409db689f9ce3bb98f.28d30f926f545047fc59da64289371eef0fbdc0764ce9ec56f808a646fcfec59": No such file or directory Error #2
|
OSError
|
def __getstate__(self):
# special handling for serializing transformer models
config_state_dict = self.model.config.__dict__
model_state_dict = self.model.state_dict()
# serialize the transformer models and the constructor arguments (but nothing else)
model_state = {
"config_state_dict": config_state_dict,
"model_state_dict": model_state_dict,
"embedding_length_internal": self.embedding_length,
"name": self.name,
"layer_indexes": self.layer_indexes,
"subtoken_pooling": self.pooling_operation,
"context_length": self.context_length,
"layer_mean": self.layer_mean,
"fine_tune": self.fine_tune,
"allow_long_sentences": self.allow_long_sentences,
"memory_effective_training": self.memory_effective_training,
"respect_document_boundaries": self.respect_document_boundaries,
}
return model_state
|
def __getstate__(self):
state = self.__dict__.copy()
state["tokenizer"] = None
return state
|
https://github.com/flairNLP/flair/issues/1747
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-2f413379aedd> in <module>
6
7 # load the NER tagger
----> 8 tagger = SequenceTagger.load('best-model.pt')
9
10
c:\users\nicod\miniconda3\lib\site-packages\flair\nn.py in load(cls, model)
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
---> 88 state = torch.load(f, map_location='cpu')
89
90 model = cls._init_model_with_state_dict(state)
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
c:\users\nicod\miniconda3\lib\site-packages\transformers\tokenization_camembert.py in __setstate__(self, d)
259 raise
260 self.sp_model = spm.SentencePieceProcessor()
--> 261 self.sp_model.Load(self.vocab_file)
262
263 def convert_tokens_to_string(self, tokens):
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in Load(self, model_file, model_proto)
365 if model_proto:
366 return self.LoadFromSerializedProto(model_proto)
--> 367 return self.LoadFromFile(model_file)
368
369
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in LoadFromFile(self, arg)
175
176 def LoadFromFile(self, arg):
--> 177 return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
178
179 def Init(self,
OSError: Not found: "/home/ubuntu/.cache/torch/transformers/3715e3a4a2de48834619b2a6f48979e13ddff5cabfb1f3409db689f9ce3bb98f.28d30f926f545047fc59da64289371eef0fbdc0764ce9ec56f808a646fcfec59": No such file or directory Error #2
|
OSError
|
def __setstate__(self, d):
self.__dict__ = d
# necessary for reverse compatibility with Flair <= 0.7
if "use_scalar_mix" in self.__dict__.keys():
self.__dict__["layer_mean"] = d["use_scalar_mix"]
if not "memory_effective_training" in self.__dict__.keys():
self.__dict__["memory_effective_training"] = True
if "pooling_operation" in self.__dict__.keys():
self.__dict__["subtoken_pooling"] = d["pooling_operation"]
if not "context_length" in self.__dict__.keys():
self.__dict__["context_length"] = 0
if "use_context" in self.__dict__.keys():
self.__dict__["context_length"] = (
64 if self.__dict__["use_context"] == True else 0
)
if not "respect_document_boundaries" in self.__dict__.keys():
self.__dict__["respect_document_boundaries"] = True
if not "memory_effective_training" in self.__dict__.keys():
self.__dict__["memory_effective_training"] = True
# constructor arguments
layers = ",".join([str(idx) for idx in self.__dict__["layer_indexes"]])
subtoken_pooling = self.__dict__["subtoken_pooling"]
context_length = self.__dict__["context_length"]
layer_mean = self.__dict__["layer_mean"]
fine_tune = self.__dict__["fine_tune"]
allow_long_sentences = self.__dict__["allow_long_sentences"]
respect_document_boundaries = self.__dict__["respect_document_boundaries"]
memory_effective_training = self.__dict__["memory_effective_training"]
model_name = self.__dict__["name"].split("transformer-word-")[-1]
# special handling for deserializing transformer models
if "config_state_dict" in d:
# load transformer model
config_class = CONFIG_MAPPING[d["config_state_dict"]["model_type"]]
loaded_config = config_class.from_dict(d["config_state_dict"])
# re-initialize transformer word embeddings with constructor arguments
embedding = TransformerWordEmbeddings(
model_name,
layers=layers,
subtoken_pooling=subtoken_pooling,
use_context=context_length,
layer_mean=layer_mean,
fine_tune=fine_tune,
allow_long_sentences=allow_long_sentences,
respect_document_boundaries=respect_document_boundaries,
memory_effective_training=memory_effective_training,
config=loaded_config,
state_dict=d["model_state_dict"],
)
# I have no idea why this is necessary, but otherwise it doesn't work
for key in embedding.__dict__.keys():
self.__dict__[key] = embedding.__dict__[key]
else:
# reload tokenizer to get around serialization issues
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
except:
pass
self.tokenizer = tokenizer
|
def __setstate__(self, d):
self.__dict__ = d
# reload tokenizer to get around serialization issues
model_name = self.name.split("transformer-word-")[-1]
try:
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
except:
pass
|
https://github.com/flairNLP/flair/issues/1747
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-2f413379aedd> in <module>
6
7 # load the NER tagger
----> 8 tagger = SequenceTagger.load('best-model.pt')
9
10
c:\users\nicod\miniconda3\lib\site-packages\flair\nn.py in load(cls, model)
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
---> 88 state = torch.load(f, map_location='cpu')
89
90 model = cls._init_model_with_state_dict(state)
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
c:\users\nicod\miniconda3\lib\site-packages\torch\serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
c:\users\nicod\miniconda3\lib\site-packages\transformers\tokenization_camembert.py in __setstate__(self, d)
259 raise
260 self.sp_model = spm.SentencePieceProcessor()
--> 261 self.sp_model.Load(self.vocab_file)
262
263 def convert_tokens_to_string(self, tokens):
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in Load(self, model_file, model_proto)
365 if model_proto:
366 return self.LoadFromSerializedProto(model_proto)
--> 367 return self.LoadFromFile(model_file)
368
369
c:\users\nicod\miniconda3\lib\site-packages\sentencepiece.py in LoadFromFile(self, arg)
175
176 def LoadFromFile(self, arg):
--> 177 return _sentencepiece.SentencePieceProcessor_LoadFromFile(self, arg)
178
179 def Init(self,
OSError: Not found: "/home/ubuntu/.cache/torch/transformers/3715e3a4a2de48834619b2a6f48979e13ddff5cabfb1f3409db689f9ce3bb98f.28d30f926f545047fc59da64289371eef0fbdc0764ce9ec56f808a646fcfec59": No such file or directory Error #2
|
OSError
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
language_code = sentence.get_language_code()
supported = [
"en",
"de",
"bg",
"ca",
"hr",
"cs",
"da",
"nl",
"et",
"fi",
"fr",
"el",
"he",
"hu",
"id",
"it",
"mk",
"no",
"pl",
"pt",
"ro",
"ru",
"sk",
]
if language_code not in supported:
language_code = "en"
if language_code not in self.language_embeddings:
log.info(f"Loading up MUSE embeddings for '{language_code}'!")
# download if necessary
hu_path: str = (
"https://flair.informatik.hu-berlin.de/resources/embeddings/muse"
)
cache_dir = Path("embeddings") / "MUSE"
cached_path(
f"{hu_path}/muse.{language_code}.vec.gensim.vectors.npy",
cache_dir=cache_dir,
)
embeddings_file = cached_path(
f"{hu_path}/muse.{language_code}.vec.gensim", cache_dir=cache_dir
)
# load the model
self.language_embeddings[language_code] = gensim.models.KeyedVectors.load(
str(embeddings_file)
)
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
word_embedding = self.get_cached_vec(language_code=language_code, word=word)
token.set_embedding(self.name, word_embedding)
return sentences
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
language_code = sentence.get_language_code()
supported = [
"en",
"de",
"bg",
"ca",
"hr",
"cs",
"da",
"nl",
"et",
"fi",
"fr",
"el",
"he",
"hu",
"id",
"it",
"mk",
"no",
"pl",
"pt",
"ro",
"ru",
"sk",
]
if language_code not in supported:
language_code = "en"
if language_code not in self.language_embeddings:
log.info(f"Loading up MUSE embeddings for '{language_code}'!")
# download if necessary
webpath = "https://alan-nlp.s3.eu-central-1.amazonaws.com/resources/embeddings-muse"
cache_dir = Path("embeddings") / "MUSE"
cached_path(
f"{webpath}/muse.{language_code}.vec.gensim.vectors.npy",
cache_dir=cache_dir,
)
embeddings_file = cached_path(
f"{webpath}/muse.{language_code}.vec.gensim", cache_dir=cache_dir
)
# load the model
self.language_embeddings[language_code] = gensim.models.KeyedVectors.load(
str(embeddings_file)
)
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
word_embedding = self.get_cached_vec(language_code=language_code, word=word)
token.set_embedding(self.name, word_embedding)
return sentences
|
https://github.com/flairNLP/flair/issues/1841
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-26-1ee39d7138b3> in <module>()
----> 1 df_test['flair'] = df_test['word'].apply(lambda x: flair_lstm(x))
10 frames
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/transformers/configuration_utils.py in use_return_dict(self)
217 """
218 # If torchscript is set, force `return_dict=False` to avoid jit errors
--> 219 return self.return_dict and not self.torchscript
220
221 @property
AttributeError: 'DistilBertConfig' object has no attribute 'return_dict'
|
AttributeError
|
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["de-offensive-language"] = "/".join(
[hu_path, "de-offensive-language", "germ-eval-2018-task-1-v0.4.pt"]
)
# English sentiment models
model_map["sentiment"] = "/".join(
[hu_path, "sentiment-curated-distilbert", "sentiment-en-mix-distillbert_3.1.pt"]
)
model_map["en-sentiment"] = "/".join(
[hu_path, "sentiment-curated-distilbert", "sentiment-en-mix-distillbert_3.1.pt"]
)
model_map["sentiment-fast"] = "/".join(
[hu_path, "sentiment-curated-fasttext-rnn", "sentiment-en-mix-ft-rnn.pt"]
)
# Communicative Functions Model
model_map["communicative-functions"] = "/".join(
[hu_path, "comfunc", "communicative-functions-v0.5b.pt"]
)
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
|
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["de-offensive-language"] = "/".join(
[hu_path, "de-offensive-language", "germ-eval-2018-task-1-v0.4.pt"]
)
# English sentiment models
model_map["sentiment"] = "/".join(
[hu_path, "sentiment-curated-distilbert", "sentiment-en-mix-distillbert.pt"]
)
model_map["en-sentiment"] = "/".join(
[hu_path, "sentiment-curated-distilbert", "sentiment-en-mix-distillbert.pt"]
)
model_map["sentiment-fast"] = "/".join(
[hu_path, "sentiment-curated-fasttext-rnn", "sentiment-en-mix-ft-rnn.pt"]
)
# Communicative Functions Model
model_map["communicative-functions"] = "/".join(
[hu_path, "comfunc", "communicative-functions-v0.5b.pt"]
)
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
|
https://github.com/flairNLP/flair/issues/1841
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-26-1ee39d7138b3> in <module>()
----> 1 df_test['flair'] = df_test['word'].apply(lambda x: flair_lstm(x))
10 frames
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/transformers/configuration_utils.py in use_return_dict(self)
217 """
218 # If torchscript is set, force `return_dict=False` to avoid jit errors
--> 219 return self.return_dict and not self.torchscript
220
221 @property
AttributeError: 'DistilBertConfig' object has no attribute 'return_dict'
|
AttributeError
|
def __init__(
self,
model: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
batch_size: int = 1,
use_scalar_mix: bool = False,
fine_tune: bool = False,
allow_long_sentences: bool = True,
**kwargs,
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param pooling_operation: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer
models tend to be huge.
:param use_scalar_mix: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
super().__init__()
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# load tokenizer and transformer model
self.tokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config, **kwargs)
self.allow_long_sentences = allow_long_sentences
if allow_long_sentences:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = self.tokenizer.model_max_length // 2
else:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = 0
# model name
self.name = "transformer-word-" + str(model)
# when initializing, embeddings are in eval mode by default
self.model.eval()
self.model.to(flair.device)
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[
-1
]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
# self.mix = ScalarMix(mixture_size=len(self.layer_indexes), trainable=False)
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
self.batch_size = batch_size
self.special_tokens = []
# check if special tokens exist to circumvent error message
if self.tokenizer._bos_token:
self.special_tokens.append(self.tokenizer.bos_token)
if self.tokenizer._cls_token:
self.special_tokens.append(self.tokenizer.cls_token)
# most models have an intial BOS token, except for XLNet, T5 and GPT2
self.begin_offset = 1
if type(self.tokenizer) == XLNetTokenizer:
self.begin_offset = 0
if type(self.tokenizer) == T5Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == GPT2Tokenizer:
self.begin_offset = 0
|
def __init__(
self,
model: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
batch_size: int = 1,
use_scalar_mix: bool = False,
fine_tune: bool = False,
allow_long_sentences: bool = False,
**kwargs,
):
"""
Bidirectional transformer embeddings of words from various transformer architectures.
:param model: name of transformer model (see https://huggingface.co/transformers/pretrained_models.html for
options)
:param layers: string indicating which layers to take for embedding (-1 is topmost layer)
:param pooling_operation: how to get from token piece embeddings to token embedding. Either take the first
subtoken ('first'), the last subtoken ('last'), both first and last ('first_last') or a mean over all ('mean')
:param batch_size: How many sentence to push through transformer at once. Set to 1 by default since transformer
models tend to be huge.
:param use_scalar_mix: If True, uses a scalar mix of layers as embedding
:param fine_tune: If True, allows transformers to be fine-tuned during training
"""
super().__init__()
# temporary fix to disable tokenizer parallelism warning
# (see https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning)
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# load tokenizer and transformer model
self.tokenizer = AutoTokenizer.from_pretrained(model, **kwargs)
config = AutoConfig.from_pretrained(model, output_hidden_states=True, **kwargs)
self.model = AutoModel.from_pretrained(model, config=config, **kwargs)
self.allow_long_sentences = allow_long_sentences
if allow_long_sentences:
self.max_subtokens_sequence_length = self.tokenizer.model_max_length
self.stride = self.tokenizer.model_max_length // 2
else:
self.max_subtokens_sequence_length = None
self.stride = 0
# model name
self.name = "transformer-word-" + str(model)
# when initializing, embeddings are in eval mode by default
self.model.eval()
self.model.to(flair.device)
# embedding parameters
if layers == "all":
# send mini-token through to check how many layers the model has
hidden_states = self.model(torch.tensor([1], device=flair.device).unsqueeze(0))[
-1
]
self.layer_indexes = [int(x) for x in range(len(hidden_states))]
else:
self.layer_indexes = [int(x) for x in layers.split(",")]
# self.mix = ScalarMix(mixture_size=len(self.layer_indexes), trainable=False)
self.pooling_operation = pooling_operation
self.use_scalar_mix = use_scalar_mix
self.fine_tune = fine_tune
self.static_embeddings = not self.fine_tune
self.batch_size = batch_size
self.special_tokens = []
# check if special tokens exist to circumvent error message
if self.tokenizer._bos_token:
self.special_tokens.append(self.tokenizer.bos_token)
if self.tokenizer._cls_token:
self.special_tokens.append(self.tokenizer.cls_token)
# most models have an intial BOS token, except for XLNet, T5 and GPT2
self.begin_offset = 1
if type(self.tokenizer) == XLNetTokenizer:
self.begin_offset = 0
if type(self.tokenizer) == T5Tokenizer:
self.begin_offset = 0
if type(self.tokenizer) == GPT2Tokenizer:
self.begin_offset = 0
|
https://github.com/flairNLP/flair/issues/1690
|
----------------------------------------------------------------------------------------------------
2020-06-12 18:44:49,152 Model training base path: "dialogpt"
2020-06-12 18:44:49,152 ----------------------------------------------------------------------------------------------------
2020-06-12 18:44:49,153 Device: cuda:0
2020-06-12 18:44:49,154 ----------------------------------------------------------------------------------------------------
2020-06-12 18:44:49,155 Embeddings storage mode: cpu
2020-06-12 18:44:49,230 ----------------------------------------------------------------------------------------------------
2020-06-12 18:49:00,669 epoch 1 - iter 76/766 - loss 1.11887558 - samples/sec: 9.67
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-e12f92b71d0b> in <module>
7 mini_batch_chunk_size=2, # set this if you get OOM errors
8 max_epochs=1, # very few epochs of fine-tuning
----> 9 embeddings_storage_mode='cpu',
10 #checkpoint=True
11 )
~/venv36/lib64/python3.6/site-packages/flair/trainers/trainer.py in train(self, base_path, learning_rate, mini_batch_size, mini_batch_chunk_size, max_epochs, scheduler, anneal_factor, patience, initial_extra_patience, min_learning_rate, train_with_dev, monitor_train, monitor_test, embeddings_storage_mode, checkpoint, save_final_model, anneal_with_restarts, anneal_with_prestarts, batch_growth_annealing, shuffle, param_selection_mode, num_workers, sampler, use_amp, amp_opt_level, eval_on_train_fraction, eval_on_train_shuffle, **kwargs)
376 if not param_selection_mode:
377 weight_extractor.extract_weights(
--> 378 self.model.state_dict(), iteration
379 )
380
~/venv36/lib64/python3.6/site-packages/flair/training_utils.py in extract_weights(self, state_dict, iteration)
262 vec = state_dict[key]
263 weights_to_watch = min(
--> 264 self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size()))
265 )
266
TypeError: reduce() of empty sequence with no initial value
|
TypeError
|
def extract_weights(self, state_dict, iteration):
for key in state_dict.keys():
vec = state_dict[key]
# print(vec)
try:
weights_to_watch = min(
self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size()))
)
except:
continue
if key not in self.weights_dict:
self._init_weights_index(key, state_dict, weights_to_watch)
for i in range(weights_to_watch):
vec = state_dict[key]
for index in self.weights_dict[key][i]:
vec = vec[index]
value = vec.item()
with open(self.weights_file, "a") as f:
f.write("{}\t{}\t{}\t{}\n".format(iteration, key, i, float(value)))
|
def extract_weights(self, state_dict, iteration):
for key in state_dict.keys():
vec = state_dict[key]
weights_to_watch = min(
self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size()))
)
if key not in self.weights_dict:
self._init_weights_index(key, state_dict, weights_to_watch)
for i in range(weights_to_watch):
vec = state_dict[key]
for index in self.weights_dict[key][i]:
vec = vec[index]
value = vec.item()
with open(self.weights_file, "a") as f:
f.write("{}\t{}\t{}\t{}\n".format(iteration, key, i, float(value)))
|
https://github.com/flairNLP/flair/issues/1690
|
----------------------------------------------------------------------------------------------------
2020-06-12 18:44:49,152 Model training base path: "dialogpt"
2020-06-12 18:44:49,152 ----------------------------------------------------------------------------------------------------
2020-06-12 18:44:49,153 Device: cuda:0
2020-06-12 18:44:49,154 ----------------------------------------------------------------------------------------------------
2020-06-12 18:44:49,155 Embeddings storage mode: cpu
2020-06-12 18:44:49,230 ----------------------------------------------------------------------------------------------------
2020-06-12 18:49:00,669 epoch 1 - iter 76/766 - loss 1.11887558 - samples/sec: 9.67
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-e12f92b71d0b> in <module>
7 mini_batch_chunk_size=2, # set this if you get OOM errors
8 max_epochs=1, # very few epochs of fine-tuning
----> 9 embeddings_storage_mode='cpu',
10 #checkpoint=True
11 )
~/venv36/lib64/python3.6/site-packages/flair/trainers/trainer.py in train(self, base_path, learning_rate, mini_batch_size, mini_batch_chunk_size, max_epochs, scheduler, anneal_factor, patience, initial_extra_patience, min_learning_rate, train_with_dev, monitor_train, monitor_test, embeddings_storage_mode, checkpoint, save_final_model, anneal_with_restarts, anneal_with_prestarts, batch_growth_annealing, shuffle, param_selection_mode, num_workers, sampler, use_amp, amp_opt_level, eval_on_train_fraction, eval_on_train_shuffle, **kwargs)
376 if not param_selection_mode:
377 weight_extractor.extract_weights(
--> 378 self.model.state_dict(), iteration
379 )
380
~/venv36/lib64/python3.6/site-packages/flair/training_utils.py in extract_weights(self, state_dict, iteration)
262 vec = state_dict[key]
263 weights_to_watch = min(
--> 264 self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size()))
265 )
266
TypeError: reduce() of empty sequence with no initial value
|
TypeError
|
def __setstate__(self, state):
self.__dict__ = state
if re.fullmatch(r"cuda:[0-9]+", str(flair.device)):
cuda_device = int(str(flair.device).split(":")[-1])
elif str(flair.device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee.cuda_device = cuda_device
self.ee.elmo_bilm.to(device=flair.device)
self.ee.elmo_bilm._elmo_lstm._states = tuple(
[state.to(flair.device) for state in self.ee.elmo_bilm._elmo_lstm._states]
)
|
def __setstate__(self, state):
from bpemb.util import sentencepiece_load
model_file = self.model_tpl.format(lang=state["lang"], vs=state["vs"])
self.__dict__ = state
# write out the binary sentence piece model into the expected directory
self.cache_dir: Path = Path(flair.cache_root) / "embeddings"
if "spm_model_binary" in self.__dict__:
# if the model was saved as binary and it is not found on disk, write to appropriate path
if not os.path.exists(self.cache_dir / state["lang"]):
os.makedirs(self.cache_dir / state["lang"])
self.model_file = self.cache_dir / model_file
with open(self.model_file, "wb") as out:
out.write(self.__dict__["spm_model_binary"])
else:
# otherwise, use normal process and potentially trigger another download
self.model_file = self._load_file(model_file)
# once the modes if there, load it with sentence piece
state["spm"] = sentencepiece_load(self.model_file)
|
https://github.com/flairNLP/flair/issues/1753
|
RuntimeError Traceback (most recent call last)
<ipython-input-21-83d7f894d041> in <module>()
----> classifier.predict(sentence)
12 frames
/usr/local/lib/python3.6/dist-packages/flair/models/text_classification_model.py in predict(self, sentences, mini_batch_size, multi_class_prob, verbose, label_name, return_loss, embedding_storage_mode)
220 continue
221
--> 222 scores = self.forward(batch)
223
224 if return_loss:
/usr/local/lib/python3.6/dist-packages/flair/models/text_classification_model.py in forward(self, sentences)
97 def forward(self, sentences):
98
---> 99 self.document_embeddings.embed(sentences)
100
101 embedding_names = self.document_embeddings.get_names()
/usr/local/lib/python3.6/dist-packages/flair/embeddings/legacy.py in embed(self, sentences)
1423 sentences.sort(key=lambda x: len(x), reverse=True)
1424
-> 1425 self.embeddings.embed(sentences)
1426
1427 # first, sort sentences by number of tokens
/usr/local/lib/python3.6/dist-packages/flair/embeddings/token.py in embed(self, sentences, static_embeddings)
69
70 for embedding in self.embeddings:
---> 71 embedding.embed(sentences)
72
73 @property
/usr/local/lib/python3.6/dist-packages/flair/embeddings/base.py in embed(self, sentences)
59
60 if not everything_embedded or not self.static_embeddings:
---> 61 self._add_embeddings_internal(sentences)
62
63 return sentences
/usr/local/lib/python3.6/dist-packages/flair/embeddings/token.py in _add_embeddings_internal(self, sentences)
1702 sentence_words.append([token.text for token in sentence])
1703
-> 1704 embeddings = self.ee.embed_batch(sentence_words)
1705
1706 for i, sentence in enumerate(sentences):
/usr/local/lib/python3.6/dist-packages/allennlp/commands/elmo.py in embed_batch(self, batch)
253 if batch == [[]]:
254 elmo_embeddings.append(empty_embedding())
--> 255 else:
256 embeddings, mask = self.batch_to_embeddings(batch)
257 for i in range(len(batch)):
/usr/local/lib/python3.6/dist-packages/allennlp/commands/elmo.py in batch_to_embeddings(self, batch)
195 if self.cuda_device >= 0:
196 character_ids = character_ids.cuda(device=self.cuda_device)
--> 197
198 bilm_output = self.elmo_bilm(character_ids)
199 layer_activations = bilm_output['activations']
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
548 result = self._slow_forward(*input, **kwargs)
549 else:
--> 550 result = self.forward(*input, **kwargs)
551 for hook in self._forward_hooks.values():
552 hook_result = hook(self, input, result)
/usr/local/lib/python3.6/dist-packages/allennlp/modules/elmo.py in forward(self, inputs, word_inputs)
605 type_representation = token_embedding['token_embedding']
606 else:
--> 607 token_embedding = self._token_embedder(inputs)
608 mask = token_embedding['mask']
609 type_representation = token_embedding['token_embedding']
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
548 result = self._slow_forward(*input, **kwargs)
549 else:
--> 550 result = self.forward(*input, **kwargs)
551 for hook in self._forward_hooks.values():
552 hook_result = hook(self, input, result)
/usr/local/lib/python3.6/dist-packages/allennlp/modules/elmo.py in forward(self, inputs)
357 character_embedding = torch.nn.functional.embedding(
358 character_ids_with_bos_eos.view(-1, max_chars_per_token),
--> 359 self._char_embedding_weights
360 )
361
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1722 # remove once script supports set_grad_enabled
1723 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1724 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1725
1726
RuntimeError: Expected object of device type cuda but got device type cpu for argument #1 'self' in call to _th_index_select
|
RuntimeError
|
def __init__(
self,
language: str = None,
dim: int = 50,
syllables: int = 100000,
cache_dir=None,
model_file_path: Path = None,
embedding_file_path: Path = None,
**kwargs,
):
"""
Initializes BP embeddings. Constructor downloads required files if not there.
"""
if not cache_dir:
cache_dir = Path(flair.cache_root) / "embeddings"
if language:
self.name: str = f"bpe-{language}-{syllables}-{dim}"
else:
assert model_file_path is not None and embedding_file_path is not None, (
"Need to specify model_file_path and embedding_file_path if no language is given in BytePairEmbeddings(...)"
)
dim = None
self.embedder = BPEmbSerializable(
lang=language,
vs=syllables,
dim=dim,
cache_dir=cache_dir,
model_file=model_file_path,
emb_file=embedding_file_path,
**kwargs,
)
if not language:
self.name: str = f"bpe-custom-{self.embedder.vs}-{self.embedder.dim}"
self.static_embeddings = True
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
|
def __init__(
self,
language: str = None,
dim: int = 50,
syllables: int = 100000,
cache_dir=Path(flair.cache_root) / "embeddings",
model_file_path: Path = None,
embedding_file_path: Path = None,
**kwargs,
):
"""
Initializes BP embeddings. Constructor downloads required files if not there.
"""
if language:
self.name: str = f"bpe-{language}-{syllables}-{dim}"
else:
assert model_file_path is not None and embedding_file_path is not None, (
"Need to specify model_file_path and embedding_file_path if no language is given in BytePairEmbeddings(...)"
)
dim = None
self.embedder = BPEmb(
lang=language,
vs=syllables,
dim=dim,
cache_dir=cache_dir,
model_file=model_file_path,
emb_file=embedding_file_path,
**kwargs,
)
if not language:
self.name: str = f"bpe-custom-{self.embedder.vs}-{self.embedder.dim}"
self.static_embeddings = True
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
|
https://github.com/flairNLP/flair/issues/1483
|
2020-03-20 13:32:20,184 loading file /home/abhisaar/Downloads/final-model8.pt
downloading https://nlp.h-its.org/bpemb/en/en.wiki.bpe.vs100000.model
100%|██████████| 1987533/1987533 [00:01<00:00, 1082354.99B/s]
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
/usr/lib/python3.6/pathlib.py in mkdir(self, mode, parents, exist_ok)
1247 try:
-> 1248 self._accessor.mkdir(self, mode)
1249 except FileNotFoundError:
/usr/lib/python3.6/pathlib.py in wrapped(pathobj, *args)
386 def wrapped(pathobj, *args):
--> 387 return strfunc(str(pathobj), *args)
388 return staticmethod(wrapped)
FileNotFoundError: [Errno 2] **No such file or directory: '/home/abhissha-new/.flair/embeddings/en'**
During handling of the above exception, another exception occurred:
FileNotFoundError Traceback (most recent call last)
/usr/lib/python3.6/pathlib.py in mkdir(self, mode, parents, exist_ok)
1247 try:
-> 1248 self._accessor.mkdir(self, mode)
1249 except FileNotFoundError:
/usr/lib/python3.6/pathlib.py in wrapped(pathobj, *args)
386 def wrapped(pathobj, *args):
--> 387 return strfunc(str(pathobj), *args)
388 return staticmethod(wrapped)
FileNotFoundError: [Errno 2] No such file or directory: '/home/abhissha-new/.flair/embeddings'
During handling of the above exception, another exception occurred:
FileNotFoundError Traceback (most recent call last)
/usr/lib/python3.6/pathlib.py in mkdir(self, mode, parents, exist_ok)
1247 try:
-> 1248 self._accessor.mkdir(self, mode)
1249 except FileNotFoundError:
/usr/lib/python3.6/pathlib.py in wrapped(pathobj, *args)
386 def wrapped(pathobj, *args):
--> 387 return strfunc(str(pathobj), *args)
388 return staticmethod(wrapped)
FileNotFoundError: [Errno 2] No such file or directory: '/home/abhissha-new/.flair'
During handling of the above exception, another exception occurred:
PermissionError Traceback (most recent call last)
<ipython-input-4-b077ce389efe> in <module>()
1 from flair.models import SequenceTagger
----> 2 model = SequenceTagger.load('/home/abhisaar/Downloads/final-model8.pt')
/home/abhisaar/.local/lib/python3.6/site-packages/flair/nn.py in load(cls, model)
84 # see https://github.com/zalandoresearch/flair/issues/351
85 f = file_utils.load_big_file(str(model_file))
---> 86 state = torch.load(f, map_location=flair.device)
87
88 model = cls._init_model_with_state_dict(state)
/home/abhisaar/.local/lib/python3.6/site-packages/torch/serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
527 with _open_zipfile_reader(f) as opened_zipfile:
528 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 529 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
530
531
/home/abhisaar/.local/lib/python3.6/site-packages/torch/serialization.py in _legacy_load(f, map_location, pickle_module, **pickle_load_args)
700 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
701 unpickler.persistent_load = persistent_load
--> 702 result = unpickler.load()
703
704 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
/home/abhisaar/.local/lib/python3.6/site-packages/bpemb/bpemb.py in __setstate__(self, state)
452 model_rel_path = Path(state["lang"]) / model_file.name
453 model_file = self._load_file(
--> 454 str(model_rel_path), cache_dir=state["cache_dir"])
455 state['spm'] = sentencepiece_load(model_file)
456 self.__dict__ = state
/home/abhisaar/.local/lib/python3.6/site-packages/bpemb/bpemb.py in _load_file(self, file, archive, cache_dir)
194 file_url = self.base_url + file + suffix
195 print("downloading", file_url)
--> 196 return http_get(file_url, cached_file, ignore_tardir=True)
197
198 def __repr__(self):
/home/abhisaar/.local/lib/python3.6/site-packages/bpemb/util.py in http_get(url, outfile, ignore_tardir)
43 # shutil.copyfileobj() starts at current position, so go to the start
44 temp_file.seek(0)
---> 45 outfile.parent.mkdir(exist_ok=True, parents=True)
46 if headers.get("Content-Type") == "application/x-gzip":
47 import tarfile
/usr/lib/python3.6/pathlib.py in mkdir(self, mode, parents, exist_ok)
1250 if not parents or self.parent == self:
1251 raise
-> 1252 self.parent.mkdir(parents=True, exist_ok=True)
1253 self.mkdir(mode, parents=False, exist_ok=exist_ok)
1254 except OSError:
/usr/lib/python3.6/pathlib.py in mkdir(self, mode, parents, exist_ok)
1250 if not parents or self.parent == self:
1251 raise
-> 1252 self.parent.mkdir(parents=True, exist_ok=True)
1253 self.mkdir(mode, parents=False, exist_ok=exist_ok)
1254 except OSError:
/usr/lib/python3.6/pathlib.py in mkdir(self, mode, parents, exist_ok)
1250 if not parents or self.parent == self:
1251 raise
-> 1252 self.parent.mkdir(parents=True, exist_ok=True)
1253 self.mkdir(mode, parents=False, exist_ok=exist_ok)
1254 except OSError:
/usr/lib/python3.6/pathlib.py in mkdir(self, mode, parents, exist_ok)
1246 self._raise_closed()
1247 try:
-> 1248 self._accessor.mkdir(self, mode)
1249 except FileNotFoundError:
1250 if not parents or self.parent == self:
/usr/lib/python3.6/pathlib.py in wrapped(pathobj, *args)
385 @functools.wraps(strfunc)
386 def wrapped(pathobj, *args):
--> 387 return strfunc(str(pathobj), *args)
388 return staticmethod(wrapped)
389
PermissionError: [Errno 13] Permission denied: '/home/abhissha-new'
|
FileNotFoundError
|
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
"""Match subtokenization to Flair tokenization and extract embeddings from transformers for each token."""
# first, subtokenize each sentence and find out into how many subtokens each token was divided
subtokenized_sentences = []
subtokenized_sentences_token_lengths = []
sentence_parts_lengths = []
# TODO: keep for backwards compatibility, but remove in future
# some pretrained models do not have this property, applying default settings now.
# can be set manually after loading the model.
if not hasattr(self, "max_subtokens_sequence_length"):
self.max_subtokens_sequence_length = None
self.allow_long_sentences = False
self.stride = 0
non_empty_sentences = []
empty_sentences = []
for sentence in sentences:
tokenized_string = sentence.to_tokenized_string()
# method 1: subtokenize sentence
# subtokenized_sentence = self.tokenizer.encode(tokenized_string, add_special_tokens=True)
# method 2:
# transformer specific tokenization
subtokenized_sentence = self.tokenizer.tokenize(tokenized_string)
if len(subtokenized_sentence) == 0:
empty_sentences.append(sentence)
continue
else:
non_empty_sentences.append(sentence)
token_subtoken_lengths = self.reconstruct_tokens_from_subtokens(
sentence, subtokenized_sentence
)
subtokenized_sentences_token_lengths.append(token_subtoken_lengths)
subtoken_ids_sentence = self.tokenizer.convert_tokens_to_ids(
subtokenized_sentence
)
nr_sentence_parts = 0
while subtoken_ids_sentence:
nr_sentence_parts += 1
encoded_inputs = self.tokenizer.prepare_for_model(
subtoken_ids_sentence,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
)
subtoken_ids_split_sentence = encoded_inputs["input_ids"]
subtokenized_sentences.append(
torch.tensor(subtoken_ids_split_sentence, dtype=torch.long)
)
if "overflowing_tokens" in encoded_inputs:
subtoken_ids_sentence = encoded_inputs["overflowing_tokens"]
else:
subtoken_ids_sentence = None
sentence_parts_lengths.append(nr_sentence_parts)
# empty sentences get zero embeddings
for sentence in empty_sentences:
for token in sentence:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
# only embed non-empty sentences and if there is at least one
sentences = non_empty_sentences
if len(sentences) == 0:
return
# find longest sentence in batch
longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))
total_sentence_parts = sum(sentence_parts_lengths)
# initialize batch tensors and mask
input_ids = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
mask = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
for s_id, sentence in enumerate(subtokenized_sentences):
sequence_length = len(sentence)
input_ids[s_id][:sequence_length] = sentence
mask[s_id][:sequence_length] = torch.ones(sequence_length)
# put encoded batch through transformer model to get all hidden states of all encoder layers
hidden_states = self.model(input_ids, attention_mask=mask)[-1]
# make the tuple a tensor; makes working with it easier.
hidden_states = torch.stack(hidden_states)
sentence_idx_offset = 0
# gradients are enabled if fine-tuning is enabled
gradient_context = (
torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
)
with gradient_context:
# iterate over all subtokenized sentences
for sentence_idx, (sentence, subtoken_lengths, nr_sentence_parts) in enumerate(
zip(sentences, subtokenized_sentences_token_lengths, sentence_parts_lengths)
):
sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
for i in range(1, nr_sentence_parts):
sentence_idx_offset += 1
remainder_sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
# remove stride_size//2 at end of sentence_hidden_state, and half at beginning of remainder,
# in order to get some context into the embeddings of these words.
# also don't include the embedding of the extra [CLS] and [SEP] tokens.
sentence_hidden_state = torch.cat(
(
sentence_hidden_state[:, : -1 - self.stride // 2, :],
remainder_sentence_hidden_state[:, 1 + self.stride // 2 :, :],
),
1,
)
subword_start_idx = self.begin_offset
# for each token, get embedding
for token_idx, (token, number_of_subtokens) in enumerate(
zip(sentence, subtoken_lengths)
):
# some tokens have no subtokens at all (if omitted by BERT tokenizer) so return zero vector
if number_of_subtokens == 0:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
subword_end_idx = subword_start_idx + number_of_subtokens
subtoken_embeddings: List[torch.FloatTensor] = []
# get states from all selected layers, aggregate with pooling operation
for layer in self.layer_indexes:
current_embeddings = sentence_hidden_state[layer][
subword_start_idx:subword_end_idx
]
if self.pooling_operation == "first":
final_embedding: torch.FloatTensor = current_embeddings[0]
if self.pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
if self.pooling_operation == "first_last":
final_embedding: torch.Tensor = torch.cat(
[current_embeddings[0], current_embeddings[-1]]
)
if self.pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in current_embeddings
]
final_embedding: torch.Tensor = torch.mean(
torch.cat(all_embeddings, dim=0), dim=0
)
subtoken_embeddings.append(final_embedding)
# use scalar mix of embeddings if so selected
if self.use_scalar_mix:
sm_embeddings = torch.mean(
torch.stack(subtoken_embeddings, dim=1), dim=1
)
# sm_embeddings = self.mix(subtoken_embeddings)
subtoken_embeddings = [sm_embeddings]
# set the extracted embedding for the token
token.set_embedding(self.name, torch.cat(subtoken_embeddings))
subword_start_idx += number_of_subtokens
|
def _add_embeddings_to_sentences(self, sentences: List[Sentence]):
"""Match subtokenization to Flair tokenization and extract embeddings from transformers for each token."""
# first, subtokenize each sentence and find out into how many subtokens each token was divided
subtokenized_sentences = []
subtokenized_sentences_token_lengths = []
sentence_parts_lengths = []
# TODO: keep for backwards compatibility, but remove in future
# some pretrained models do not have this property, applying default settings now.
# can be set manually after loading the model.
if not hasattr(self, "max_subtokens_sequence_length"):
self.max_subtokens_sequence_length = None
self.allow_long_sentences = False
self.stride = 0
for sentence in sentences:
tokenized_string = sentence.to_tokenized_string()
# method 1: subtokenize sentence
# subtokenized_sentence = self.tokenizer.encode(tokenized_string, add_special_tokens=True)
# method 2:
# transformer specific tokenization
subtokenized_sentence = self.tokenizer.tokenize(tokenized_string)
token_subtoken_lengths = self.reconstruct_tokens_from_subtokens(
sentence, subtokenized_sentence
)
subtokenized_sentences_token_lengths.append(token_subtoken_lengths)
subtoken_ids_sentence = self.tokenizer.convert_tokens_to_ids(
subtokenized_sentence
)
nr_sentence_parts = 0
while subtoken_ids_sentence:
nr_sentence_parts += 1
encoded_inputs = self.tokenizer.prepare_for_model(
subtoken_ids_sentence,
max_length=self.max_subtokens_sequence_length,
stride=self.stride,
return_overflowing_tokens=self.allow_long_sentences,
)
subtoken_ids_split_sentence = encoded_inputs["input_ids"]
subtokenized_sentences.append(
torch.tensor(subtoken_ids_split_sentence, dtype=torch.long)
)
if "overflowing_tokens" in encoded_inputs:
subtoken_ids_sentence = encoded_inputs["overflowing_tokens"]
else:
subtoken_ids_sentence = None
sentence_parts_lengths.append(nr_sentence_parts)
# find longest sentence in batch
longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))
total_sentence_parts = sum(sentence_parts_lengths)
# initialize batch tensors and mask
input_ids = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
mask = torch.zeros(
[total_sentence_parts, longest_sequence_in_batch],
dtype=torch.long,
device=flair.device,
)
for s_id, sentence in enumerate(subtokenized_sentences):
sequence_length = len(sentence)
input_ids[s_id][:sequence_length] = sentence
mask[s_id][:sequence_length] = torch.ones(sequence_length)
# put encoded batch through transformer model to get all hidden states of all encoder layers
hidden_states = self.model(input_ids, attention_mask=mask)[-1]
# make the tuple a tensor; makes working with it easier.
hidden_states = torch.stack(hidden_states)
sentence_idx_offset = 0
# gradients are enabled if fine-tuning is enabled
gradient_context = (
torch.enable_grad() if (self.fine_tune and self.training) else torch.no_grad()
)
with gradient_context:
# iterate over all subtokenized sentences
for sentence_idx, (sentence, subtoken_lengths, nr_sentence_parts) in enumerate(
zip(sentences, subtokenized_sentences_token_lengths, sentence_parts_lengths)
):
sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
for i in range(1, nr_sentence_parts):
sentence_idx_offset += 1
remainder_sentence_hidden_state = hidden_states[
:, sentence_idx + sentence_idx_offset, ...
]
# remove stride_size//2 at end of sentence_hidden_state, and half at beginning of remainder,
# in order to get some context into the embeddings of these words.
# also don't include the embedding of the extra [CLS] and [SEP] tokens.
sentence_hidden_state = torch.cat(
(
sentence_hidden_state[:, : -1 - self.stride // 2, :],
remainder_sentence_hidden_state[:, 1 + self.stride // 2 :, :],
),
1,
)
subword_start_idx = self.begin_offset
# for each token, get embedding
for token_idx, (token, number_of_subtokens) in enumerate(
zip(sentence, subtoken_lengths)
):
# some tokens have no subtokens at all (if omitted by BERT tokenizer) so return zero vector
if number_of_subtokens == 0:
token.set_embedding(self.name, torch.zeros(self.embedding_length))
continue
subword_end_idx = subword_start_idx + number_of_subtokens
subtoken_embeddings: List[torch.FloatTensor] = []
# get states from all selected layers, aggregate with pooling operation
for layer in self.layer_indexes:
current_embeddings = sentence_hidden_state[layer][
subword_start_idx:subword_end_idx
]
if self.pooling_operation == "first":
final_embedding: torch.FloatTensor = current_embeddings[0]
if self.pooling_operation == "last":
final_embedding: torch.FloatTensor = current_embeddings[-1]
if self.pooling_operation == "first_last":
final_embedding: torch.Tensor = torch.cat(
[current_embeddings[0], current_embeddings[-1]]
)
if self.pooling_operation == "mean":
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in current_embeddings
]
final_embedding: torch.Tensor = torch.mean(
torch.cat(all_embeddings, dim=0), dim=0
)
subtoken_embeddings.append(final_embedding)
# use scalar mix of embeddings if so selected
if self.use_scalar_mix:
sm_embeddings = torch.mean(
torch.stack(subtoken_embeddings, dim=1), dim=1
)
# sm_embeddings = self.mix(subtoken_embeddings)
subtoken_embeddings = [sm_embeddings]
# set the extracted embedding for the token
token.set_embedding(self.name, torch.cat(subtoken_embeddings))
subword_start_idx += number_of_subtokens
|
https://github.com/flairNLP/flair/issues/1703
|
Traceback (most recent call last):
File "train_flair.py", line 187, in <module>
sys.exit(main(sys.argv[1:]))
File "train_flair.py", line 182, in main
trainer.train(args.outfolder, checkpoint=True, **hp)
File "/opt/conda/lib/python3.6/site-packages/flair/trainers/trainer.py", line 364, in train
loss = self.model.forward_loss(batch_step)
File "/opt/conda/lib/python3.6/site-packages/flair/models/text_classification_model.py", line 146, in forward_loss
scores = self.forward(data_points)
File "/opt/conda/lib/python3.6/site-packages/flair/models/text_classification_model.py", line 100, in forward
self.document_embeddings.embed(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/base.py", line 61, in embed
self._add_embeddings_internal(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/document.py", line 369, in _add_embeddings_internal
self.embeddings.embed(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/token.py", line 71, in embed
embedding.embed(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/base.py", line 61, in embed
self._add_embeddings_internal(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/token.py", line 892, in _add_embeddings_internal
self._add_embeddings_to_sentences(batch)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/token.py", line 965, in _add_embeddings_to_sentences
longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))
ValueError: max() arg is an empty sequence
|
ValueError
|
def reconstruct_tokens_from_subtokens(self, sentence, subtokens):
word_iterator = iter(sentence)
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
token_subtoken_lengths = []
reconstructed_token = ""
subtoken_count = 0
# iterate over subtokens and reconstruct tokens
for subtoken_id, subtoken in enumerate(subtokens):
# remove special markup
subtoken = self._remove_special_markup(subtoken)
# TODO check if this is necessary is this method is called before prepare_for_model
# check if reconstructed token is special begin token ([CLS] or similar)
if subtoken in self.special_tokens and subtoken_id == 0:
continue
# some BERT tokenizers somehow omit words - in such cases skip to next token
if subtoken_count == 0 and not token_text.startswith(subtoken.lower()):
while True:
token_subtoken_lengths.append(0)
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
if token_text.startswith(subtoken.lower()):
break
subtoken_count += 1
# append subtoken to reconstruct token
reconstructed_token = reconstructed_token + subtoken
# check if reconstructed token is the same as current token
if reconstructed_token.lower() == token_text:
# if so, add subtoken count
token_subtoken_lengths.append(subtoken_count)
# reset subtoken count and reconstructed token
reconstructed_token = ""
subtoken_count = 0
# break from loop if all tokens are accounted for
if len(token_subtoken_lengths) < len(sentence):
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
else:
break
# if tokens are unaccounted for
while len(token_subtoken_lengths) < len(sentence) and len(token.text) == 1:
token_subtoken_lengths.append(0)
if len(token_subtoken_lengths) == len(sentence):
break
token = next(word_iterator)
# check if all tokens were matched to subtokens
if token != sentence[-1]:
log.error(
f"Tokenization MISMATCH in sentence '{sentence.to_tokenized_string()}'"
)
log.error(f"Last matched: '{token}'")
log.error(f"Last sentence: '{sentence[-1]}'")
log.error(f"subtokenized: '{subtokens}'")
return token_subtoken_lengths
|
def reconstruct_tokens_from_subtokens(self, sentence, subtokens):
word_iterator = iter(sentence)
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
token_subtoken_lengths = []
reconstructed_token = ""
subtoken_count = 0
# iterate over subtokens and reconstruct tokens
for subtoken_id, subtoken in enumerate(subtokens):
# remove special markup
subtoken = self._remove_special_markup(subtoken)
# TODO check if this is necessary is this method is called before prepare_for_model
# check if reconstructed token is special begin token ([CLS] or similar)
if subtoken in self.special_tokens and subtoken_id == 0:
continue
# some BERT tokenizers somehow omit words - in such cases skip to next token
if subtoken_count == 0 and not token_text.startswith(subtoken.lower()):
token_subtoken_lengths.append(0)
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
subtoken_count += 1
# append subtoken to reconstruct token
reconstructed_token = reconstructed_token + subtoken
# check if reconstructed token is the same as current token
if reconstructed_token.lower() == token_text:
# if so, add subtoken count
token_subtoken_lengths.append(subtoken_count)
# reset subtoken count and reconstructed token
reconstructed_token = ""
subtoken_count = 0
# break from loop if all tokens are accounted for
if len(token_subtoken_lengths) < len(sentence):
token = next(word_iterator)
token_text = self._get_processed_token_text(token)
else:
break
# check if all tokens were matched to subtokens
if token != sentence[-1]:
log.error(
f"Tokenization MISMATCH in sentence '{sentence.to_tokenized_string()}'"
)
log.error(f"Last matched: '{token}'")
log.error(f"Last sentence: '{sentence[-1]}'")
log.error(f"subtokenized: '{subtokens}'")
return token_subtoken_lengths
|
https://github.com/flairNLP/flair/issues/1703
|
Traceback (most recent call last):
File "train_flair.py", line 187, in <module>
sys.exit(main(sys.argv[1:]))
File "train_flair.py", line 182, in main
trainer.train(args.outfolder, checkpoint=True, **hp)
File "/opt/conda/lib/python3.6/site-packages/flair/trainers/trainer.py", line 364, in train
loss = self.model.forward_loss(batch_step)
File "/opt/conda/lib/python3.6/site-packages/flair/models/text_classification_model.py", line 146, in forward_loss
scores = self.forward(data_points)
File "/opt/conda/lib/python3.6/site-packages/flair/models/text_classification_model.py", line 100, in forward
self.document_embeddings.embed(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/base.py", line 61, in embed
self._add_embeddings_internal(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/document.py", line 369, in _add_embeddings_internal
self.embeddings.embed(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/token.py", line 71, in embed
embedding.embed(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/base.py", line 61, in embed
self._add_embeddings_internal(sentences)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/token.py", line 892, in _add_embeddings_internal
self._add_embeddings_to_sentences(batch)
File "/opt/conda/lib/python3.6/site-packages/flair/embeddings/token.py", line 965, in _add_embeddings_to_sentences
longest_sequence_in_batch: int = len(max(subtokenized_sentences, key=len))
ValueError: max() arg is an empty sequence
|
ValueError
|
def _fetch_model(model_name) -> str:
model_map = {}
aws_resource_path_v04 = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4"
)
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["ner"] = "/".join(
[aws_resource_path_v04, "NER-conll03-english", "en-ner-conll03-v0.4.pt"]
)
model_map["ner-fast"] = "/".join(
[
aws_resource_path_v04,
"NER-conll03--h256-l1-b32-p3-0.5-%2Bglove%2Bnews-forward-fast%2Bnews-backward-fast-normal-locked0.5-word0.05--release_4",
"en-ner-fast-conll03-v0.4.pt",
]
)
model_map["ner-ontonotes"] = "/".join(
[
aws_resource_path_v04,
"release-ner-ontonotes-0",
"en-ner-ontonotes-v0.4.pt",
]
)
model_map["ner-ontonotes-fast"] = "/".join(
[
aws_resource_path_v04,
"release-ner-ontonotes-fast-0",
"en-ner-ontonotes-fast-v0.4.pt",
]
)
for key in ["ner-multi", "multi-ner"]:
model_map[key] = "/".join(
[
aws_resource_path_v04,
"release-quadner-512-l2-multi-embed",
"quadner-large.pt",
]
)
for key in ["ner-multi-fast", "multi-ner-fast"]:
model_map[key] = "/".join(
[aws_resource_path_v04, "NER-multi-fast", "ner-multi-fast.pt"]
)
for key in ["ner-multi-fast-learn", "multi-ner-fast-learn"]:
model_map[key] = "/".join(
[
aws_resource_path_v04,
"NER-multi-fast-evolve",
"ner-multi-fast-learn.pt",
]
)
model_map["upos"] = "/".join(
[
aws_resource_path_v04,
"POS-ontonotes--h256-l1-b32-p3-0.5-%2Bglove%2Bnews-forward%2Bnews-backward-normal-locked0.5-word0.05--v0.4_0",
"en-pos-ontonotes-v0.4.pt",
]
)
model_map["pos"] = "/".join(
[
hu_path,
"release-pos-0",
"en-pos-ontonotes-v0.5.pt",
]
)
model_map["upos-fast"] = "/".join(
[
aws_resource_path_v04,
"release-pos-fast-0",
"en-pos-ontonotes-fast-v0.4.pt",
]
)
model_map["pos-fast"] = "/".join(
[
hu_path,
"release-pos-fast-0",
"en-pos-ontonotes-fast-v0.5.pt",
]
)
for key in ["pos-multi", "multi-pos"]:
model_map[key] = "/".join(
[
aws_resource_path_v04,
"release-dodekapos-512-l2-multi",
"pos-multi-v0.1.pt",
]
)
for key in ["pos-multi-fast", "multi-pos-fast"]:
model_map[key] = "/".join(
[aws_resource_path_v04, "UPOS-multi-fast", "pos-multi-fast.pt"]
)
model_map["frame"] = "/".join(
[aws_resource_path_v04, "release-frame-1", "en-frame-ontonotes-v0.4.pt"]
)
model_map["frame-fast"] = "/".join(
[
aws_resource_path_v04,
"release-frame-fast-0",
"en-frame-ontonotes-fast-v0.4.pt",
]
)
model_map["chunk"] = "/".join(
[
aws_resource_path_v04,
"NP-conll2000--h256-l1-b32-p3-0.5-%2Bnews-forward%2Bnews-backward-normal-locked0.5-word0.05--v0.4_0",
"en-chunk-conll2000-v0.4.pt",
]
)
model_map["chunk-fast"] = "/".join(
[
aws_resource_path_v04,
"release-chunk-fast-0",
"en-chunk-conll2000-fast-v0.4.pt",
]
)
model_map["da-pos"] = "/".join(
[aws_resource_path_v04, "POS-danish", "da-pos-v0.1.pt"]
)
model_map["da-ner"] = "/".join(
[aws_resource_path_v04, "NER-danish", "da-ner-v0.1.pt"]
)
model_map["de-pos"] = "/".join(
[hu_path, "release-de-pos-0", "de-pos-ud-hdt-v0.5.pt"]
)
model_map["de-pos-tweets"] = "/".join(
[
aws_resource_path_v04,
"POS-fine-grained-german-tweets",
"de-pos-twitter-v0.1.pt",
]
)
model_map["de-ner"] = "/".join(
[aws_resource_path_v04, "release-de-ner-0", "de-ner-conll03-v0.4.pt"]
)
model_map["de-ner-germeval"] = "/".join(
[aws_resource_path_v04, "NER-germeval", "de-ner-germeval-0.4.1.pt"]
)
model_map["fr-ner"] = "/".join(
[aws_resource_path_v04, "release-fr-ner-0", "fr-ner-wikiner-0.4.pt"]
)
model_map["nl-ner"] = "/".join(
[hu_path, "dutch-ner_0", "nl-ner-bert-conll02-v0.5.pt"]
)
model_map["nl-ner-rnn"] = "/".join(
[hu_path, "dutch-ner-flair-0", "nl-ner-conll02-v0.5.pt"]
)
model_map["ml-pos"] = (
"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-xpos-model.pt"
)
model_map["ml-upos"] = (
"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-upos-model.pt"
)
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
# the historical German taggers by the @redewiegergabe project
if model_name == "de-historic-indirect":
model_file = Path(flair.cache_root) / cache_dir / "indirect" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/indirect.zip", cache_dir=cache_dir
)
unzip_file(
Path(flair.cache_root) / cache_dir / "indirect.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "indirect" / "final-model.pt"
)
if model_name == "de-historic-direct":
model_file = Path(flair.cache_root) / cache_dir / "direct" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/direct.zip", cache_dir=cache_dir
)
unzip_file(
Path(flair.cache_root) / cache_dir / "direct.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "direct" / "final-model.pt"
)
if model_name == "de-historic-reported":
model_file = Path(flair.cache_root) / cache_dir / "reported" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/reported.zip", cache_dir=cache_dir
)
unzip_file(
Path(flair.cache_root) / cache_dir / "reported.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "reported" / "final-model.pt"
)
if model_name == "de-historic-free-indirect":
model_file = (
Path(flair.cache_root) / cache_dir / "freeIndirect" / "final-model.pt"
)
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/freeIndirect.zip",
cache_dir=cache_dir,
)
unzip_file(
Path(flair.cache_root) / cache_dir / "freeIndirect.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "freeIndirect" / "final-model.pt"
)
return model_name
|
def _fetch_model(model_name) -> str:
model_map = {}
aws_resource_path_v04 = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4"
)
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["ner"] = "/".join(
[aws_resource_path_v04, "NER-conll03-english", "en-ner-conll03-v0.4.pt"]
)
model_map["ner-fast"] = "/".join(
[
aws_resource_path_v04,
"NER-conll03--h256-l1-b32-p3-0.5-%2Bglove%2Bnews-forward-fast%2Bnews-backward-fast-normal-locked0.5-word0.05--release_4",
"en-ner-fast-conll03-v0.4.pt",
]
)
model_map["ner-ontonotes"] = "/".join(
[
aws_resource_path_v04,
"release-ner-ontonotes-0",
"en-ner-ontonotes-v0.4.pt",
]
)
model_map["ner-ontonotes-fast"] = "/".join(
[
aws_resource_path_v04,
"release-ner-ontonotes-fast-0",
"en-ner-ontonotes-fast-v0.4.pt",
]
)
for key in ["ner-multi", "multi-ner"]:
model_map[key] = "/".join(
[
aws_resource_path_v04,
"release-quadner-512-l2-multi-embed",
"quadner-large.pt",
]
)
for key in ["ner-multi-fast", "multi-ner-fast"]:
model_map[key] = "/".join(
[aws_resource_path_v04, "NER-multi-fast", "ner-multi-fast.pt"]
)
for key in ["ner-multi-fast-learn", "multi-ner-fast-learn"]:
model_map[key] = "/".join(
[
aws_resource_path_v04,
"NER-multi-fast-evolve",
"ner-multi-fast-learn.pt",
]
)
model_map["upos"] = "/".join(
[
aws_resource_path_v04,
"POS-ontonotes--h256-l1-b32-p3-0.5-%2Bglove%2Bnews-forward%2Bnews-backward-normal-locked0.5-word0.05--v0.4_0",
"en-pos-ontonotes-v0.4.pt",
]
)
model_map["pos"] = "/".join(
[
hu_path,
"release-pos-0",
"en-pos-ontonotes-v0.5.pt",
]
)
model_map["upos-fast"] = "/".join(
[
aws_resource_path_v04,
"release-pos-fast-0",
"en-pos-ontonotes-fast-v0.4.pt",
]
)
model_map["pos-fast"] = "/".join(
[
hu_path,
"release-pos-fast-0",
"en-pos-ontonotes-fast-v0.5.pt",
]
)
for key in ["pos-multi", "multi-pos"]:
model_map[key] = "/".join(
[
aws_resource_path_v04,
"release-dodekapos-512-l2-multi",
"pos-multi-v0.1.pt",
]
)
for key in ["pos-multi-fast", "multi-pos-fast"]:
model_map[key] = "/".join(
[aws_resource_path_v04, "UPOS-multi-fast", "pos-multi-fast.pt"]
)
model_map["frame"] = "/".join(
[aws_resource_path_v04, "release-frame-1", "en-frame-ontonotes-v0.4.pt"]
)
model_map["frame-fast"] = "/".join(
[
aws_resource_path_v04,
"release-frame-fast-0",
"en-frame-ontonotes-fast-v0.4.pt",
]
)
model_map["chunk"] = "/".join(
[
aws_resource_path_v04,
"NP-conll2000--h256-l1-b32-p3-0.5-%2Bnews-forward%2Bnews-backward-normal-locked0.5-word0.05--v0.4_0",
"en-chunk-conll2000-v0.4.pt",
]
)
model_map["chunk-fast"] = "/".join(
[
aws_resource_path_v04,
"release-chunk-fast-0",
"en-chunk-conll2000-fast-v0.4.pt",
]
)
model_map["da-pos"] = "/".join(
[aws_resource_path_v04, "POS-danish", "da-pos-v0.1.pt"]
)
model_map["da-ner"] = "/".join(
[aws_resource_path_v04, "NER-danish", "da-ner-v0.1.pt"]
)
model_map["de-pos"] = "/".join(
[hu_path, "release-de-pos-0", "de-pos-ud-hdt-v0.5.pt"]
)
model_map["de-pos-tweets"] = "/".join(
[
aws_resource_path_v04,
"POS-fine-grained-german-tweets",
"de-pos-twitter-v0.1.pt",
]
)
model_map["de-ner"] = "/".join(
[aws_resource_path_v04, "release-de-ner-0", "de-ner-conll03-v0.4.pt"]
)
model_map["de-ner-germeval"] = "/".join(
[aws_resource_path_v04, "NER-germeval", "de-ner-germeval-0.4.1.pt"]
)
model_map["fr-ner"] = "/".join(
[aws_resource_path_v04, "release-fr-ner-0", "fr-ner-wikiner-0.4.pt"]
)
model_map["nl-ner"] = "/".join(
[aws_resource_path_v04, "NER-conll2002-dutch", "nl-ner-conll02-v0.1.pt"]
)
model_map["ml-pos"] = (
"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-xpos-model.pt"
)
model_map["ml-upos"] = (
"https://raw.githubusercontent.com/qburst/models-repository/master/FlairMalayalamModels/malayalam-upos-model.pt"
)
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
# the historical German taggers by the @redewiegergabe project
if model_name == "de-historic-indirect":
model_file = Path(flair.cache_root) / cache_dir / "indirect" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/indirect.zip", cache_dir=cache_dir
)
unzip_file(
Path(flair.cache_root) / cache_dir / "indirect.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "indirect" / "final-model.pt"
)
if model_name == "de-historic-direct":
model_file = Path(flair.cache_root) / cache_dir / "direct" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/direct.zip", cache_dir=cache_dir
)
unzip_file(
Path(flair.cache_root) / cache_dir / "direct.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "direct" / "final-model.pt"
)
if model_name == "de-historic-reported":
model_file = Path(flair.cache_root) / cache_dir / "reported" / "final-model.pt"
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/reported.zip", cache_dir=cache_dir
)
unzip_file(
Path(flair.cache_root) / cache_dir / "reported.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "reported" / "final-model.pt"
)
if model_name == "de-historic-free-indirect":
model_file = (
Path(flair.cache_root) / cache_dir / "freeIndirect" / "final-model.pt"
)
if not model_file.exists():
cached_path(
"http://www.redewiedergabe.de/models/freeIndirect.zip",
cache_dir=cache_dir,
)
unzip_file(
Path(flair.cache_root) / cache_dir / "freeIndirect.zip",
Path(flair.cache_root) / cache_dir,
)
model_name = str(
Path(flair.cache_root) / cache_dir / "freeIndirect" / "final-model.pt"
)
return model_name
|
https://github.com/flairNLP/flair/issues/1365
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-8-38968e7669bc> in <module>
5
6 # predict NER tags
----> 7 tagger.predict(sentence)
8
9 # print sentence with predicted tags
~/code/flair/flair/models/sequence_tagger_model.py in predict(self, sentences, mini_batch_size, embedding_storage_mode, all_tag_prob, verbose, use_tokenizer)
331 continue
332
--> 333 feature: torch.Tensor = self.forward(batch)
334 tags, all_tags = self._obtain_labels(
335 feature=feature,
~/code/flair/flair/models/sequence_tagger_model.py in forward(self, sentences)
471 def forward(self, sentences: List[Sentence]):
472
--> 473 self.embeddings.embed(sentences)
474
475 lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
~/code/flair/flair/embeddings.py in embed(self, sentences, static_embeddings)
175
176 for embedding in self.embeddings:
--> 177 embedding.embed(sentences)
178
179 @property
~/code/flair/flair/embeddings.py in embed(self, sentences)
94
95 if not everything_embedded or not self.static_embeddings:
---> 96 self._add_embeddings_internal(sentences)
97
98 return sentences
~/code/flair/flair/embeddings.py in _add_embeddings_internal(self, sentences)
2638 # get hidden states from language model
2639 all_hidden_states_in_lm = self.lm.get_representation(
-> 2640 text_sentences, start_marker, end_marker, self.chars_per_chunk
2641 )
2642
~/anaconda3/envs/flair/lib/python3.7/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
574 return modules[name]
575 raise AttributeError("'{}' object has no attribute '{}'".format(
--> 576 type(self).__name__, name))
577
578 def __setattr__(self, name, value):
AttributeError: 'CharLMEmbeddings' object has no attribute 'chars_per_chunk'
|
AttributeError
|
def __init__(
self,
hidden_size: int,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
use_crf: bool = True,
use_rnn: bool = True,
rnn_layers: int = 1,
dropout: float = 0.0,
word_dropout: float = 0.05,
locked_dropout: float = 0.5,
train_initial_hidden_state: bool = False,
rnn_type: str = "LSTM",
pickle_module: str = "pickle",
beta: float = 1.0,
loss_weights: Dict[str, float] = None,
):
"""
Initializes a SequenceTagger
:param hidden_size: number of hidden states in RNN
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
:param use_crf: if True use CRF decoder, else project directly to tag space
:param use_rnn: if True use RNN layer, otherwise use word embeddings directly
:param rnn_layers: number of RNN layers
:param dropout: dropout probability
:param word_dropout: word dropout probability
:param locked_dropout: locked dropout probability
:param train_initial_hidden_state: if True, trains initial hidden state of RNN
:param beta: Parameter for F-beta score for evaluation and training annealing
:param loss_weights: Dictionary of weights for classes (tags) for the loss function
(if any tag's weight is unspecified it will default to 1.0)
"""
super(SequenceTagger, self).__init__()
self.use_rnn = use_rnn
self.hidden_size = hidden_size
self.use_crf: bool = use_crf
self.rnn_layers: int = rnn_layers
self.trained_epochs: int = 0
self.embeddings = embeddings
# set the dictionaries
self.tag_dictionary: Dictionary = tag_dictionary
# if we use a CRF, we must add special START and STOP tags to the dictionary
if use_crf:
self.tag_dictionary.add_item(START_TAG)
self.tag_dictionary.add_item(STOP_TAG)
self.tag_type: str = tag_type
self.tagset_size: int = len(tag_dictionary)
self.beta = beta
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.tag_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.tag_dictionary.get_items()):
if tag in loss_weights.keys():
weight_list[i] = loss_weights[tag]
self.loss_weights = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
# initialize the network architecture
self.nlayers: int = rnn_layers
self.hidden_word = None
# dropouts
self.use_dropout: float = dropout
self.use_word_dropout: float = word_dropout
self.use_locked_dropout: float = locked_dropout
self.pickle_module = pickle_module
if dropout > 0.0:
self.dropout = torch.nn.Dropout(dropout)
if word_dropout > 0.0:
self.word_dropout = flair.nn.WordDropout(word_dropout)
if locked_dropout > 0.0:
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
rnn_input_dim: int = self.embeddings.embedding_length
self.relearn_embeddings: bool = True
if self.relearn_embeddings:
self.embedding2nn = torch.nn.Linear(rnn_input_dim, rnn_input_dim)
self.train_initial_hidden_state = train_initial_hidden_state
self.bidirectional = True
self.rnn_type = rnn_type
# bidirectional LSTM on top of embedding layer
if self.use_rnn:
num_directions = 2 if self.bidirectional else 1
if self.rnn_type in ["LSTM", "GRU"]:
self.rnn = getattr(torch.nn, self.rnn_type)(
rnn_input_dim,
hidden_size,
num_layers=self.nlayers,
dropout=0.0 if self.nlayers == 1 else 0.5,
bidirectional=True,
batch_first=True,
)
# Create initial hidden state and initialize it
if self.train_initial_hidden_state:
self.hs_initializer = torch.nn.init.xavier_normal_
self.lstm_init_h = Parameter(
torch.randn(self.nlayers * num_directions, self.hidden_size),
requires_grad=True,
)
self.lstm_init_c = Parameter(
torch.randn(self.nlayers * num_directions, self.hidden_size),
requires_grad=True,
)
# TODO: Decide how to initialize the hidden state variables
# self.hs_initializer(self.lstm_init_h)
# self.hs_initializer(self.lstm_init_c)
# final linear map to tag space
self.linear = torch.nn.Linear(hidden_size * num_directions, len(tag_dictionary))
else:
self.linear = torch.nn.Linear(
self.embeddings.embedding_length, len(tag_dictionary)
)
if self.use_crf:
self.transitions = torch.nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size)
)
self.transitions.detach()[
self.tag_dictionary.get_idx_for_item(START_TAG), :
] = -10000
self.transitions.detach()[
:, self.tag_dictionary.get_idx_for_item(STOP_TAG)
] = -10000
self.to(flair.device)
|
def __init__(
self,
hidden_size: int,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
use_crf: bool = True,
use_rnn: bool = True,
rnn_layers: int = 1,
dropout: float = 0.0,
word_dropout: float = 0.05,
locked_dropout: float = 0.5,
train_initial_hidden_state: bool = False,
rnn_type: str = "LSTM",
pickle_module: str = "pickle",
beta: float = 1.0,
loss_weights: Dict[str, float] = None,
):
"""
Initializes a SequenceTagger
:param hidden_size: number of hidden states in RNN
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
:param use_crf: if True use CRF decoder, else project directly to tag space
:param use_rnn: if True use RNN layer, otherwise use word embeddings directly
:param rnn_layers: number of RNN layers
:param dropout: dropout probability
:param word_dropout: word dropout probability
:param locked_dropout: locked dropout probability
:param train_initial_hidden_state: if True, trains initial hidden state of RNN
:param beta: Parameter for F-beta score for evaluation and training annealing
:param loss_weights: Dictionary of weights for classes (tags) for the loss function
(if any tag's weight is unspecified it will default to 1.0)
"""
super(SequenceTagger, self).__init__()
self.use_rnn = use_rnn
self.hidden_size = hidden_size
self.use_crf: bool = use_crf
self.rnn_layers: int = rnn_layers
self.trained_epochs: int = 0
self.embeddings = embeddings
# set the dictionaries
self.tag_dictionary: Dictionary = tag_dictionary
self.tag_type: str = tag_type
self.tagset_size: int = len(tag_dictionary)
self.beta = beta
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.tag_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.tag_dictionary.get_items()):
if tag in loss_weights.keys():
weight_list[i] = loss_weights[tag]
self.loss_weights = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
# initialize the network architecture
self.nlayers: int = rnn_layers
self.hidden_word = None
# dropouts
self.use_dropout: float = dropout
self.use_word_dropout: float = word_dropout
self.use_locked_dropout: float = locked_dropout
self.pickle_module = pickle_module
if dropout > 0.0:
self.dropout = torch.nn.Dropout(dropout)
if word_dropout > 0.0:
self.word_dropout = flair.nn.WordDropout(word_dropout)
if locked_dropout > 0.0:
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
rnn_input_dim: int = self.embeddings.embedding_length
self.relearn_embeddings: bool = True
if self.relearn_embeddings:
self.embedding2nn = torch.nn.Linear(rnn_input_dim, rnn_input_dim)
self.train_initial_hidden_state = train_initial_hidden_state
self.bidirectional = True
self.rnn_type = rnn_type
# bidirectional LSTM on top of embedding layer
if self.use_rnn:
num_directions = 2 if self.bidirectional else 1
if self.rnn_type in ["LSTM", "GRU"]:
self.rnn = getattr(torch.nn, self.rnn_type)(
rnn_input_dim,
hidden_size,
num_layers=self.nlayers,
dropout=0.0 if self.nlayers == 1 else 0.5,
bidirectional=True,
batch_first=True,
)
# Create initial hidden state and initialize it
if self.train_initial_hidden_state:
self.hs_initializer = torch.nn.init.xavier_normal_
self.lstm_init_h = Parameter(
torch.randn(self.nlayers * num_directions, self.hidden_size),
requires_grad=True,
)
self.lstm_init_c = Parameter(
torch.randn(self.nlayers * num_directions, self.hidden_size),
requires_grad=True,
)
# TODO: Decide how to initialize the hidden state variables
# self.hs_initializer(self.lstm_init_h)
# self.hs_initializer(self.lstm_init_c)
# final linear map to tag space
self.linear = torch.nn.Linear(hidden_size * num_directions, len(tag_dictionary))
else:
self.linear = torch.nn.Linear(
self.embeddings.embedding_length, len(tag_dictionary)
)
if self.use_crf:
self.transitions = torch.nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size)
)
self.transitions.detach()[
self.tag_dictionary.get_idx_for_item(START_TAG), :
] = -10000
self.transitions.detach()[
:, self.tag_dictionary.get_idx_for_item(STOP_TAG)
] = -10000
self.to(flair.device)
|
https://github.com/flairNLP/flair/issues/1562
|
...
2020-04-30 14:36:40,370 epoch 1 - iter 96/120 - loss 209973.64290365 - samples/sec: 33.04
2020-04-30 14:36:52,323 epoch 1 - iter 108/120 - loss 208670.76244213 - samples/sec: 32.17
2020-04-30 14:37:03,579 epoch 1 - iter 120/120 - loss 206854.72747396 - samples/sec: 34.17
2020-04-30 14:37:03,594 ----------------------------------------------------------------------------------------------------
2020-04-30 14:37:03,594 EPOCH 1 done: loss 206854.7275 - lr 0.1000000
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-18-67e2f7ae9c06> in <module>()
15 trainer.train('./resource/taggers/swahili-dumb-ner',
16 learning_rate=0.1,
---> 17 mini_batch_size=32)
18
19 # Plotting the performance graphs
3 frames
/usr/local/lib/python3.6/dist-packages/flair/models/sequence_tagger_model.py in _viterbi_decode(self, feats, transitions, all_scores)
754
755 start = best_path.pop()
--> 756 assert start == id_start
757 best_path.reverse()
758
AssertionError:
|
AssertionError
|
def _add_embeddings_internal(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
# TODO: remove in future versions
if not hasattr(self, "locked_dropout"):
self.locked_dropout = None
if not hasattr(self, "word_dropout"):
self.word_dropout = None
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
# embed words in the sentence
self.embeddings.embed(sentences)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs: List[torch.Tensor] = list()
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding()]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[
: self.embeddings.embedding_length * nb_padding_tokens
]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
# before-RNN dropout
if self.dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
# reproject if set
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
# push through RNN
packed = pack_padded_sequence(
sentence_tensor, lengths, enforce_sorted=False, batch_first=True
)
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)
# after-RNN dropout
if self.dropout:
outputs = self.dropout(outputs)
if self.locked_dropout:
outputs = self.locked_dropout(outputs)
# extract embeddings from RNN
for sentence_no, length in enumerate(lengths):
last_rep = outputs[sentence_no, length - 1]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[sentence_no, 0]
embedding = torch.cat([first_rep, last_rep], 0)
if self.static_embeddings:
embedding = embedding.detach()
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
|
def _add_embeddings_internal(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
# TODO: remove in future versions
if not hasattr(self, "locked_dropout"):
self.locked_dropout = None
if not hasattr(self, "word_dropout"):
self.word_dropout = None
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
# embed words in the sentence
self.embeddings.embed(sentences)
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
# initialize zero-padded word embeddings tensor
# sentence_tensor = torch.zeros(
# [
# len(sentences),
# longest_token_sequence_in_batch,
# self.embeddings.embedding_length,
# ],
# dtype=torch.float,
# device=flair.device,
# )
#
# for s_id, sentence in enumerate(sentences):
# # fill values with word embeddings
# all_embs = list()
#
# for index_token, token in enumerate(sentence):
# embs = token.get_each_embedding()
# if not all_embs:
# all_embs = [list() for _ in range(len(embs))]
# for index_emb, emb in enumerate(embs):
# all_embs[index_emb].append(emb)
#
# concat_word_emb = [torch.stack(embs) for embs in all_embs]
# concat_sentence_emb = torch.cat(concat_word_emb, dim=1)
# sentence_tensor[s_id][: len(sentence)] = concat_sentence_emb
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs: List[torch.Tensor] = list()
for sentence in sentences:
all_embs += [emb for token in sentence for emb in token.get_each_embedding()]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[
: self.embeddings.embedding_length * nb_padding_tokens
]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
# before-RNN dropout
if self.dropout:
sentence_tensor = self.dropout(sentence_tensor)
if self.locked_dropout:
sentence_tensor = self.locked_dropout(sentence_tensor)
if self.word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
# reproject if set
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
# push through RNN
packed = pack_padded_sequence(
sentence_tensor, lengths, enforce_sorted=False, batch_first=True
)
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = pad_packed_sequence(rnn_out, batch_first=True)
# after-RNN dropout
if self.dropout:
outputs = self.dropout(outputs)
if self.locked_dropout:
outputs = self.locked_dropout(outputs)
# extract embeddings from RNN
for sentence_no, length in enumerate(lengths):
last_rep = outputs[sentence_no, length - 1]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[sentence_no, 0]
embedding = torch.cat([first_rep, last_rep], 0)
if self.static_embeddings:
embedding = embedding.detach()
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
|
https://github.com/flairNLP/flair/issues/1377
|
AttributeError Traceback (most recent call last)
<ipython-input-4-f517a501f1cf> in <module>
----> 1 classifier = TextClassifier.load('en-sentiment')
~/Desktop/flair/flair/nn.py in load(cls, model)
86 state = torch.load(f, map_location=flair.device)
87
---> 88 model = cls._init_model_with_state_dict(state)
89
90 model.eval()
~/Desktop/flair/flair/models/text_classification_model.py in _init_model_with_state_dict(state)
129 multi_label=state["multi_label"],
130 beta=beta,
--> 131 loss_weights=weights,
132 )
133
~/Desktop/flair/flair/models/text_classification_model.py in __init__(self, document_embeddings, label_dictionary, multi_label, multi_label_threshold, beta, loss_weights)
90
91 # auto-spawn on GPU if available
---> 92 self.to(flair.device)
93
94 def _init_weights(self):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in to(self, *args, **kwargs)
423 return t.to(device, dtype if t.is_floating_point() else None, non_blocking)
424
--> 425 return self._apply(convert)
426
427 def register_backward_hook(self, hook):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
199 def _apply(self, fn):
200 for module in self.children():
--> 201 module._apply(fn)
202
203 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn)
199 def _apply(self, fn):
200 for module in self.children():
--> 201 module._apply(fn)
202
203 def compute_should_use_set_data(tensor, tensor_applied):
~/.local/lib/python3.6/site-packages/torch/nn/modules/rnn.py in _apply(self, fn)
135 # Note: be v. careful before removing this, as 3rd party device types
136 # likely rely on this behavior to properly .to() modules like LSTM.
--> 137 self._flat_weights = [getattr(self, weight) for weight in self._flat_weights_names]
138
139 # Flattens params (on CUDA)
~/.local/lib/python3.6/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
574 return modules[name]
575 raise AttributeError("'{}' object has no attribute '{}'".format(
--> 576 type(self).__name__, name))
577
578 def __setattr__(self, name, value):
AttributeError: 'GRU' object has no attribute '_flat_weights_names'
|
AttributeError
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name]
# check token.text is empty or not
if token.text:
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
aggregated_embedding = self.aggregate_op(
self.word_embeddings[token.text], local_embedding
)
if self.pooling == "fade":
aggregated_embedding /= 2
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name]
local_embedding = local_embedding.to(flair.device)
# check token.text is empty or not
if token.text:
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
aggregated_embedding = self.aggregate_op(
self.word_embeddings[token.text], local_embedding
)
if self.pooling == "fade":
aggregated_embedding /= 2
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
|
https://github.com/flairNLP/flair/issues/1270
|
2019-11-12 09:10:16,480 Reading data from /home/username/.flair/datasets/conll_03_dutch
2019-11-12 09:10:16,480 Train: /home/username/.flair/datasets/conll_03_dutch/ned.train
2019-11-12 09:10:16,480 Dev: /home/username/.flair/datasets/conll_03_dutch/ned.testa
2019-11-12 09:10:16,480 Test: /home/username/.flair/datasets/conll_03_dutch/ned.testb
2019-11-12 09:10:28,986 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Model: "SequenceTagger(
(embeddings): StackedEmbeddings(
(list_embedding_0): WordEmbeddings('nl')
(list_embedding_1): PooledFlairEmbeddings(
(context_embeddings): FlairEmbeddings(
(lm): LanguageModel(
(drop): Dropout(p=0.1, inplace=False)
(encoder): Embedding(7632, 100)
(rnn): LSTM(100, 2048)
(decoder): Linear(in_features=2048, out_features=7632, bias=True)
)
)
)
(list_embedding_2): PooledFlairEmbeddings(
(context_embeddings): FlairEmbeddings(
(lm): LanguageModel(
(drop): Dropout(p=0.1, inplace=False)
(encoder): Embedding(7632, 100)
(rnn): LSTM(100, 2048)
(decoder): Linear(in_features=2048, out_features=7632, bias=True)
)
)
)
)
(word_dropout): WordDropout(p=0.05)
(locked_dropout): LockedDropout(p=0.5)
(embedding2nn): Linear(in_features=8492, out_features=8492, bias=True)
(rnn): LSTM(8492, 256, batch_first=True, bidirectional=True)
(linear): Linear(in_features=512, out_features=20, bias=True)
)"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Corpus: "Corpus: 15806 train + 2895 dev + 5195 test sentences"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Parameters:
2019-11-12 09:10:28,987 - learning_rate: "0.1"
2019-11-12 09:10:28,987 - mini_batch_size: "32"
2019-11-12 09:10:28,987 - patience: "3"
2019-11-12 09:10:28,987 - anneal_factor: "0.5"
2019-11-12 09:10:28,987 - max_epochs: "150"
2019-11-12 09:10:28,987 - shuffle: "True"
2019-11-12 09:10:28,987 - train_with_dev: "True"
2019-11-12 09:10:28,987 - batch_growth_annealing: "False"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Model training base path: "output/models/conll03-ner-test"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Device: cuda:0
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Embeddings storage mode: cpu
2019-11-12 09:10:28,988 ----------------------------------------------------------------------------------------------------
train mode resetting embeddings
train mode resetting embeddings
2019-11-12 09:10:29,408 epoch 1 - iter 0/585 - loss 49.54404449 - samples/sec: 4426.54
Traceback (most recent call last):
File "embeddings/flair_evaluate_conll03-dutch.py", line 44, in <module>
max_epochs=150)
File "/home/username/.conda/envs/flair-test/lib/python3.7/site-packages/flair/trainers/trainer.py", line 325, in train
loss.backward()
File "/home/username/.conda/envs/flair-test/lib/python3.7/site-packages/torch/tensor.py", line 166, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/home/username/.conda/envs/flair-test/lib/python3.7/site-packages/torch/autograd/__init__.py", line 99, in backward
allow_unreachable=True) # allow_unreachable flag
RuntimeError: CUDA out of memory. Tried to allocate 276.00 MiB (GPU 0; 10.73 GiB total capacity; 8.33 GiB already allocated; 121.56 MiB free; 1.43 GiB cached)
|
RuntimeError
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name].cpu()
# check token.text is empty or not
if token.text:
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
aggregated_embedding = self.aggregate_op(
self.word_embeddings[token.text], local_embedding
)
if self.pooling == "fade":
aggregated_embedding /= 2
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name]
# check token.text is empty or not
if token.text:
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
aggregated_embedding = self.aggregate_op(
self.word_embeddings[token.text], local_embedding
)
if self.pooling == "fade":
aggregated_embedding /= 2
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
|
https://github.com/flairNLP/flair/issues/1270
|
2019-11-12 09:10:16,480 Reading data from /home/username/.flair/datasets/conll_03_dutch
2019-11-12 09:10:16,480 Train: /home/username/.flair/datasets/conll_03_dutch/ned.train
2019-11-12 09:10:16,480 Dev: /home/username/.flair/datasets/conll_03_dutch/ned.testa
2019-11-12 09:10:16,480 Test: /home/username/.flair/datasets/conll_03_dutch/ned.testb
2019-11-12 09:10:28,986 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Model: "SequenceTagger(
(embeddings): StackedEmbeddings(
(list_embedding_0): WordEmbeddings('nl')
(list_embedding_1): PooledFlairEmbeddings(
(context_embeddings): FlairEmbeddings(
(lm): LanguageModel(
(drop): Dropout(p=0.1, inplace=False)
(encoder): Embedding(7632, 100)
(rnn): LSTM(100, 2048)
(decoder): Linear(in_features=2048, out_features=7632, bias=True)
)
)
)
(list_embedding_2): PooledFlairEmbeddings(
(context_embeddings): FlairEmbeddings(
(lm): LanguageModel(
(drop): Dropout(p=0.1, inplace=False)
(encoder): Embedding(7632, 100)
(rnn): LSTM(100, 2048)
(decoder): Linear(in_features=2048, out_features=7632, bias=True)
)
)
)
)
(word_dropout): WordDropout(p=0.05)
(locked_dropout): LockedDropout(p=0.5)
(embedding2nn): Linear(in_features=8492, out_features=8492, bias=True)
(rnn): LSTM(8492, 256, batch_first=True, bidirectional=True)
(linear): Linear(in_features=512, out_features=20, bias=True)
)"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Corpus: "Corpus: 15806 train + 2895 dev + 5195 test sentences"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Parameters:
2019-11-12 09:10:28,987 - learning_rate: "0.1"
2019-11-12 09:10:28,987 - mini_batch_size: "32"
2019-11-12 09:10:28,987 - patience: "3"
2019-11-12 09:10:28,987 - anneal_factor: "0.5"
2019-11-12 09:10:28,987 - max_epochs: "150"
2019-11-12 09:10:28,987 - shuffle: "True"
2019-11-12 09:10:28,987 - train_with_dev: "True"
2019-11-12 09:10:28,987 - batch_growth_annealing: "False"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Model training base path: "output/models/conll03-ner-test"
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Device: cuda:0
2019-11-12 09:10:28,987 ----------------------------------------------------------------------------------------------------
2019-11-12 09:10:28,987 Embeddings storage mode: cpu
2019-11-12 09:10:28,988 ----------------------------------------------------------------------------------------------------
train mode resetting embeddings
train mode resetting embeddings
2019-11-12 09:10:29,408 epoch 1 - iter 0/585 - loss 49.54404449 - samples/sec: 4426.54
Traceback (most recent call last):
File "embeddings/flair_evaluate_conll03-dutch.py", line 44, in <module>
max_epochs=150)
File "/home/username/.conda/envs/flair-test/lib/python3.7/site-packages/flair/trainers/trainer.py", line 325, in train
loss.backward()
File "/home/username/.conda/envs/flair-test/lib/python3.7/site-packages/torch/tensor.py", line 166, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/home/username/.conda/envs/flair-test/lib/python3.7/site-packages/torch/autograd/__init__.py", line 99, in backward
allow_unreachable=True) # allow_unreachable flag
RuntimeError: CUDA out of memory. Tried to allocate 276.00 MiB (GPU 0; 10.73 GiB total capacity; 8.33 GiB already allocated; 121.56 MiB free; 1.43 GiB cached)
|
RuntimeError
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
language_code = sentence.get_language_code()
supported = [
"en",
"de",
"bg",
"ca",
"hr",
"cs",
"da",
"nl",
"et",
"fi",
"fr",
"el",
"he",
"hu",
"id",
"it",
"mk",
"no",
"pl",
"pt",
"ro",
"ru",
"sk",
]
if language_code not in supported:
language_code = "en"
if language_code not in self.language_embeddings:
log.info(f"Loading up MUSE embeddings for '{language_code}'!")
# download if necessary
webpath = "https://alan-nlp.s3.eu-central-1.amazonaws.com/resources/embeddings-muse"
cache_dir = Path("embeddings") / "MUSE"
cached_path(
f"{webpath}/muse.{language_code}.vec.gensim.vectors.npy",
cache_dir=cache_dir,
)
embeddings_file = cached_path(
f"{webpath}/muse.{language_code}.vec.gensim", cache_dir=cache_dir
)
# load the model
self.language_embeddings[language_code] = gensim.models.KeyedVectors.load(
str(embeddings_file)
)
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
word_embedding = self.get_cached_vec(language_code=language_code, word=word)
token.set_embedding(self.name, word_embedding)
return sentences
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
language_code = sentence.get_language_code()
print(language_code)
supported = [
"en",
"de",
"bg",
"ca",
"hr",
"cs",
"da",
"nl",
"et",
"fi",
"fr",
"el",
"he",
"hu",
"id",
"it",
"mk",
"no",
"pl",
"pt",
"ro",
"ru",
"sk",
]
if language_code not in supported:
language_code = "en"
if language_code not in self.language_embeddings:
log.info(f"Loading up MUSE embeddings for '{language_code}'!")
# download if necessary
webpath = "https://alan-nlp.s3.eu-central-1.amazonaws.com/resources/embeddings-muse"
cache_dir = Path("embeddings") / "MUSE"
cached_path(
f"{webpath}/muse.{language_code}.vec.gensim.vectors.npy",
cache_dir=cache_dir,
)
embeddings_file = cached_path(
f"{webpath}/muse.{language_code}.vec.gensim", cache_dir=cache_dir
)
# load the model
self.language_embeddings[language_code] = gensim.models.KeyedVectors.load(
str(embeddings_file)
)
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
word_embedding = self.get_cached_vec(language_code=language_code, word=word)
token.set_embedding(self.name, word_embedding)
return sentences
|
https://github.com/flairNLP/flair/issues/1115
|
2019-09-17 11:47:41,099 read text file with 4 lines
2019-09-17 11:47:41,107 read text file with 4 lines
2019-09-17 11:47:41,205 read text file with 78280 lines
2019-09-17 11:47:41,265 shuffled
2019-09-17 11:48:08,470 Sequence length is 100
2019-09-17 11:48:08,502 Split 1 - (11:48:08)
2019-09-17 11:48:13,678 | split 1 / 1 | 100/ 359 batches | ms/batch 51.60 | loss 1.50 | ppl 4.48
2019-09-17 11:48:18,817 | split 1 / 1 | 200/ 359 batches | ms/batch 51.39 | loss 1.30 | ppl 3.67
2019-09-17 11:48:23,978 | split 1 / 1 | 300/ 359 batches | ms/batch 51.60 | loss 1.24 | ppl 3.44
2019-09-17 11:48:27,022 18 seconds for train split 1
Traceback (most recent call last):
File "main.py", line 192, in <module>
main()
File "main.py", line 188, in main
fine_tune()
File "main.py", line 180, in fine_tune
flair_stuff.fine_tune(corpus_dir, stored_dir, model_name=model_name)
File "/home/nolan/code/lang_model_builder/flair_stuff.py", line 314, in fine_tune
checkpoint=True)
File "/home/nolan/anaconda2/envs/py37/lib/python3.7/site-packages/flair/trainers/language_model_trainer.py", line 428, in train
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
File "/home/nolan/anaconda2/envs/py37/lib/python3.7/site-packages/flair/trainers/language_model_trainer.py", line 511, in evaluate
return total_loss.item() / len(data_source)
AttributeError: 'int' object has no attribute 'item'
|
AttributeError
|
def generate_text(
self,
prefix: str = "\n",
number_of_characters: int = 1000,
temperature: float = 1.0,
break_on_suffix=None,
) -> Tuple[str, float]:
if prefix == "":
prefix = "\n"
with torch.no_grad():
characters = []
idx2item = self.dictionary.idx2item
# initial hidden state
hidden = self.init_hidden(1)
if len(prefix) > 1:
char_tensors = []
for character in prefix[:-1]:
char_tensors.append(
torch.tensor(self.dictionary.get_idx_for_item(character))
.unsqueeze(0)
.unsqueeze(0)
)
input = torch.cat(char_tensors).to(flair.device)
prediction, _, hidden = self.forward(input, hidden)
input = (
torch.tensor(self.dictionary.get_idx_for_item(prefix[-1]))
.unsqueeze(0)
.unsqueeze(0)
)
log_prob = 0.0
for i in range(number_of_characters):
input = input.to(flair.device)
# get predicted weights
prediction, _, hidden = self.forward(input, hidden)
prediction = prediction.squeeze().detach()
decoder_output = prediction
# divide by temperature
prediction = prediction.div(temperature)
# to prevent overflow problem with small temperature values, substract largest value from all
# this makes a vector in which the largest value is 0
max = torch.max(prediction)
prediction -= max
# compute word weights with exponential function
word_weights = prediction.exp().cpu()
# try sampling multinomial distribution for next character
try:
word_idx = torch.multinomial(word_weights, 1)[0]
except:
word_idx = torch.tensor(0)
# print(word_idx)
prob = decoder_output[word_idx]
log_prob += prob
input = word_idx.detach().unsqueeze(0).unsqueeze(0)
word = idx2item[word_idx].decode("UTF-8")
characters.append(word)
if break_on_suffix is not None:
if "".join(characters).endswith(break_on_suffix):
break
text = prefix + "".join(characters)
log_prob = log_prob.item()
log_prob /= len(characters)
if not self.is_forward_lm:
text = text[::-1]
return text, log_prob
|
def generate_text(
self,
prefix: str = "\n",
number_of_characters: int = 1000,
temperature: float = 1.0,
break_on_suffix=None,
) -> Tuple[str, float]:
if prefix == "":
prefix = "\n"
with torch.no_grad():
characters = []
idx2item = self.dictionary.idx2item
# initial hidden state
hidden = self.init_hidden(1)
if len(prefix) > 1:
char_tensors = []
for character in prefix[:-1]:
char_tensors.append(
torch.tensor(self.dictionary.get_idx_for_item(character))
.unsqueeze(0)
.unsqueeze(0)
)
input = torch.cat(char_tensors).to(flair.device)
prediction, _, hidden = self.forward(input, hidden)
input = (
torch.tensor(self.dictionary.get_idx_for_item(prefix[-1]))
.unsqueeze(0)
.unsqueeze(0)
)
log_prob = 0.0
for i in range(number_of_characters):
input = input.to(flair.device)
# get predicted weights
prediction, _, hidden = self.forward(input, hidden)
prediction = prediction.squeeze().detach()
decoder_output = prediction
# divide by temperature
prediction = prediction.div(temperature)
# to prevent overflow problem with small temperature values, substract largest value from all
# this makes a vector in which the largest value is 0
max = torch.max(prediction)
prediction -= max
# compute word weights with exponential function
word_weights = prediction.exp().cpu()
# try sampling multinomial distribution for next character
try:
word_idx = torch.multinomial(word_weights, 1)[0]
except:
word_idx = torch.tensor(0)
# print(word_idx)
prob = decoder_output[word_idx]
log_prob += prob
input = word_idx.detach().unsqueeze(0).unsqueeze(0)
word = idx2item[word_idx].decode("UTF-8")
characters.append(word)
if break_on_suffix is not None:
if "".join(characters).endswith(break_on_suffix):
break
text = prefix + "".join(characters)
log_prob = log_prob.item()
log_prob /= len(characters)
if not self.is_forward_lm:
text = text[::-1]
text = text.encode("utf-8")
return text, log_prob
|
https://github.com/flairNLP/flair/issues/1115
|
2019-09-17 11:47:41,099 read text file with 4 lines
2019-09-17 11:47:41,107 read text file with 4 lines
2019-09-17 11:47:41,205 read text file with 78280 lines
2019-09-17 11:47:41,265 shuffled
2019-09-17 11:48:08,470 Sequence length is 100
2019-09-17 11:48:08,502 Split 1 - (11:48:08)
2019-09-17 11:48:13,678 | split 1 / 1 | 100/ 359 batches | ms/batch 51.60 | loss 1.50 | ppl 4.48
2019-09-17 11:48:18,817 | split 1 / 1 | 200/ 359 batches | ms/batch 51.39 | loss 1.30 | ppl 3.67
2019-09-17 11:48:23,978 | split 1 / 1 | 300/ 359 batches | ms/batch 51.60 | loss 1.24 | ppl 3.44
2019-09-17 11:48:27,022 18 seconds for train split 1
Traceback (most recent call last):
File "main.py", line 192, in <module>
main()
File "main.py", line 188, in main
fine_tune()
File "main.py", line 180, in fine_tune
flair_stuff.fine_tune(corpus_dir, stored_dir, model_name=model_name)
File "/home/nolan/code/lang_model_builder/flair_stuff.py", line 314, in fine_tune
checkpoint=True)
File "/home/nolan/anaconda2/envs/py37/lib/python3.7/site-packages/flair/trainers/language_model_trainer.py", line 428, in train
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
File "/home/nolan/anaconda2/envs/py37/lib/python3.7/site-packages/flair/trainers/language_model_trainer.py", line 511, in evaluate
return total_loss.item() / len(data_source)
AttributeError: 'int' object has no attribute 'item'
|
AttributeError
|
def train(
self,
base_path: Union[Path, str],
sequence_length: int,
learning_rate: float = 20,
mini_batch_size: int = 100,
anneal_factor: float = 0.25,
patience: int = 10,
clip=0.25,
max_epochs: int = 1000,
checkpoint: bool = False,
grow_to_sequence_length: int = 0,
num_workers: int = 2,
use_amp: bool = False,
amp_opt_level: str = "O1",
**kwargs,
):
if use_amp:
if sys.version_info < (3, 0):
raise RuntimeError("Apex currently only supports Python 3. Aborting.")
if amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
)
# cast string to Path
if type(base_path) is str:
base_path = Path(base_path)
add_file_handler(log, base_path / "training.log")
number_of_splits: int = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
# error message if the validation dataset is too small
if val_data.size(0) == 1:
raise RuntimeError(
f"ERROR: Your validation dataset is too small. For your mini_batch_size, the data needs to "
f"consist of at least {mini_batch_size * 2} characters!"
)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = base_path / "loss.txt"
savefile = base_path / "best-lm.pt"
try:
epoch = self.epoch
best_val_loss = self.loss
optimizer = self.optimizer(self.model.parameters(), lr=learning_rate, **kwargs)
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler: ReduceLRWDOnPlateau = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
else:
scheduler: ReduceLROnPlateau = ReduceLROnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
if use_amp:
self.model, optimizer = amp.initialize(
self.model, optimizer, opt_level=amp_opt_level
)
training_generator = DataLoader(
self.corpus.train, shuffle=False, num_workers=num_workers
)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
# Shuffle training files randomly after serially iterating through corpus one
if epoch > 0:
training_generator = DataLoader(
self.corpus.train, shuffle=True, num_workers=num_workers
)
self.model.save_checkpoint(
base_path / f"epoch_{epoch}.pt",
optimizer,
epoch,
0,
best_val_loss,
)
# iterate through training data, starting at self.split (for checkpointing)
for curr_split, train_slice in enumerate(training_generator, self.split):
if sequence_length < grow_to_sequence_length:
sequence_length += 1
log.info(f"Sequence length is {sequence_length}")
split_start_time = time.time()
# off by one for printing
curr_split += 1
train_data = self._batchify(train_slice.flatten(), mini_batch_size)
log.info(
"Split %d" % curr_split
+ "\t - ({:%H:%M:%S})".format(datetime.datetime.now())
)
for group in optimizer.param_groups:
learning_rate = group["lr"]
# go into train mode
self.model.train()
# reset variables
hidden = self.model.init_hidden(mini_batch_size)
# not really sure what this does
ntokens = len(self.corpus.dictionary)
total_loss = 0
start_time = time.time()
for batch, i in enumerate(
range(0, train_data.size(0) - 1, sequence_length)
):
data, targets = self._get_batch(train_data, i, sequence_length)
if not data.is_cuda and cuda.is_available():
log.info(
"Batch %d is not on CUDA, training will be very slow"
% (batch)
)
raise Exception("data isnt on cuda")
self.model.zero_grad()
optimizer.zero_grad()
# do the forward pass in the model
output, rnn_output, hidden = self.model.forward(data, hidden)
# try to predict the targets
loss = self.loss_function(output.view(-1, ntokens), targets)
# Backward
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
# We detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = self._repackage_hidden(hidden)
# explicitly remove loss to clear up memory
del loss, output, rnn_output
if batch % self.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / self.log_interval
elapsed = time.time() - start_time
log.info(
"| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | "
"loss {:5.2f} | ppl {:8.2f}".format(
curr_split,
number_of_splits,
batch,
len(train_data) // sequence_length,
elapsed * 1000 / self.log_interval,
cur_loss,
math.exp(cur_loss),
)
)
total_loss = 0
start_time = time.time()
log.info(
"%d seconds for train split %d"
% (time.time() - split_start_time, curr_split)
)
###############################################################################
self.model.eval()
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
scheduler.step(val_loss)
log.info("best loss so far {:5.2f}".format(best_val_loss))
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
base_path / "checkpoint.pt",
optimizer,
epoch,
curr_split,
best_val_loss,
)
# Save the model if the validation loss is the best we've seen so far.
if val_loss < best_val_loss:
self.model.best_score = best_val_loss
self.model.save(savefile)
best_val_loss = val_loss
###############################################################################
# print info
###############################################################################
log.info("-" * 89)
summary = (
"| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | "
"valid ppl {:8.2f} | learning rate {:3.4f}".format(
curr_split,
number_of_splits,
epoch + 1,
(time.time() - split_start_time),
val_loss,
math.exp(val_loss),
learning_rate,
)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
log.info("Epoch time: %.2f" % (time.time() - epoch_start_time))
except KeyboardInterrupt:
log.info("-" * 89)
log.info("Exiting from training early")
###############################################################################
# final testing
###############################################################################
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = "TEST: valid loss {:5.2f} | valid ppl {:8.2f}".format(
test_loss, math.exp(test_loss)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
|
def train(
self,
base_path: Union[Path, str],
sequence_length: int,
learning_rate: float = 20,
mini_batch_size: int = 100,
anneal_factor: float = 0.25,
patience: int = 10,
clip=0.25,
max_epochs: int = 1000,
checkpoint: bool = False,
grow_to_sequence_length: int = 0,
num_workers: int = 2,
use_amp: bool = False,
amp_opt_level: str = "O1",
**kwargs,
):
if use_amp:
if sys.version_info < (3, 0):
raise RuntimeError("Apex currently only supports Python 3. Aborting.")
if amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
)
# cast string to Path
if type(base_path) is str:
base_path = Path(base_path)
add_file_handler(log, base_path / "training.log")
number_of_splits: int = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = base_path / "loss.txt"
savefile = base_path / "best-lm.pt"
try:
epoch = self.epoch
best_val_loss = self.loss
optimizer = self.optimizer(self.model.parameters(), lr=learning_rate, **kwargs)
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler: ReduceLRWDOnPlateau = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
else:
scheduler: ReduceLROnPlateau = ReduceLROnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
if use_amp:
self.model, optimizer = amp.initialize(
self.model, optimizer, opt_level=amp_opt_level
)
training_generator = DataLoader(
self.corpus.train, shuffle=False, num_workers=num_workers
)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
# Shuffle training files randomly after serially iterating through corpus one
if epoch > 0:
training_generator = DataLoader(
self.corpus.train, shuffle=True, num_workers=num_workers
)
self.model.save_checkpoint(
base_path / f"epoch_{epoch}.pt",
optimizer,
epoch,
0,
best_val_loss,
)
# iterate through training data, starting at self.split (for checkpointing)
for curr_split, train_slice in enumerate(training_generator, self.split):
if sequence_length < grow_to_sequence_length:
sequence_length += 1
log.info(f"Sequence length is {sequence_length}")
split_start_time = time.time()
# off by one for printing
curr_split += 1
train_data = self._batchify(train_slice.flatten(), mini_batch_size)
log.info(
"Split %d" % curr_split
+ "\t - ({:%H:%M:%S})".format(datetime.datetime.now())
)
for group in optimizer.param_groups:
learning_rate = group["lr"]
# go into train mode
self.model.train()
# reset variables
hidden = self.model.init_hidden(mini_batch_size)
# not really sure what this does
ntokens = len(self.corpus.dictionary)
total_loss = 0
start_time = time.time()
for batch, i in enumerate(
range(0, train_data.size(0) - 1, sequence_length)
):
data, targets = self._get_batch(train_data, i, sequence_length)
if not data.is_cuda and cuda.is_available():
log.info(
"Batch %d is not on CUDA, training will be very slow"
% (batch)
)
raise Exception("data isnt on cuda")
self.model.zero_grad()
optimizer.zero_grad()
# do the forward pass in the model
output, rnn_output, hidden = self.model.forward(data, hidden)
# try to predict the targets
loss = self.loss_function(output.view(-1, ntokens), targets)
# Backward
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
# We detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = self._repackage_hidden(hidden)
# explicitly remove loss to clear up memory
del loss, output, rnn_output
if batch % self.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / self.log_interval
elapsed = time.time() - start_time
log.info(
"| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | "
"loss {:5.2f} | ppl {:8.2f}".format(
curr_split,
number_of_splits,
batch,
len(train_data) // sequence_length,
elapsed * 1000 / self.log_interval,
cur_loss,
math.exp(cur_loss),
)
)
total_loss = 0
start_time = time.time()
log.info(
"%d seconds for train split %d"
% (time.time() - split_start_time, curr_split)
)
###############################################################################
self.model.eval()
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
scheduler.step(val_loss)
log.info("best loss so far {:5.2f}".format(best_val_loss))
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
base_path / "checkpoint.pt",
optimizer,
epoch,
curr_split,
best_val_loss,
)
# Save the model if the validation loss is the best we've seen so far.
if val_loss < best_val_loss:
self.model.best_score = best_val_loss
self.model.save(savefile)
best_val_loss = val_loss
###############################################################################
# print info
###############################################################################
log.info("-" * 89)
summary = (
"| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | "
"valid ppl {:8.2f} | learning rate {:3.4f}".format(
curr_split,
number_of_splits,
epoch + 1,
(time.time() - split_start_time),
val_loss,
math.exp(val_loss),
learning_rate,
)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
log.info("Epoch time: %.2f" % (time.time() - epoch_start_time))
except KeyboardInterrupt:
log.info("-" * 89)
log.info("Exiting from training early")
###############################################################################
# final testing
###############################################################################
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = "TEST: valid loss {:5.2f} | valid ppl {:8.2f}".format(
test_loss, math.exp(test_loss)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
|
https://github.com/flairNLP/flair/issues/1115
|
2019-09-17 11:47:41,099 read text file with 4 lines
2019-09-17 11:47:41,107 read text file with 4 lines
2019-09-17 11:47:41,205 read text file with 78280 lines
2019-09-17 11:47:41,265 shuffled
2019-09-17 11:48:08,470 Sequence length is 100
2019-09-17 11:48:08,502 Split 1 - (11:48:08)
2019-09-17 11:48:13,678 | split 1 / 1 | 100/ 359 batches | ms/batch 51.60 | loss 1.50 | ppl 4.48
2019-09-17 11:48:18,817 | split 1 / 1 | 200/ 359 batches | ms/batch 51.39 | loss 1.30 | ppl 3.67
2019-09-17 11:48:23,978 | split 1 / 1 | 300/ 359 batches | ms/batch 51.60 | loss 1.24 | ppl 3.44
2019-09-17 11:48:27,022 18 seconds for train split 1
Traceback (most recent call last):
File "main.py", line 192, in <module>
main()
File "main.py", line 188, in main
fine_tune()
File "main.py", line 180, in fine_tune
flair_stuff.fine_tune(corpus_dir, stored_dir, model_name=model_name)
File "/home/nolan/code/lang_model_builder/flair_stuff.py", line 314, in fine_tune
checkpoint=True)
File "/home/nolan/anaconda2/envs/py37/lib/python3.7/site-packages/flair/trainers/language_model_trainer.py", line 428, in train
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
File "/home/nolan/anaconda2/envs/py37/lib/python3.7/site-packages/flair/trainers/language_model_trainer.py", line 511, in evaluate
return total_loss.item() / len(data_source)
AttributeError: 'int' object has no attribute 'item'
|
AttributeError
|
def add_file_handler(log, output_file):
init_output_file(output_file.parents[0], output_file.name)
fh = logging.FileHandler(output_file, mode="w", encoding="utf-8")
fh.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-15s %(message)s")
fh.setFormatter(formatter)
log.addHandler(fh)
return fh
|
def add_file_handler(log, output_file):
init_output_file(output_file.parents[0], output_file.name)
fh = logging.FileHandler(output_file)
fh.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-15s %(message)s")
fh.setFormatter(formatter)
log.addHandler(fh)
return fh
|
https://github.com/flairNLP/flair/issues/718
|
--- Logging error ---
Traceback (most recent call last):
File "...\torch\lib\logging\__init__.py", line 1037, in emit
stream.write(msg + self.terminator)
File "...\torch\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode characters in position 28-31: character maps to <undefined>
Call stack:
File "...flair_lm.py", line 33, in <module>
File "...\torch\lib\site-packages\flair\trainers\language_model_trainer.py", line 336, in train
log.info(self.model.generate_text())
Message: (...)
Arguments: ()
|
UnicodeEncodeError
|
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU' or 'LSTM'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
# bidirectional RNN on top of embedding layer
if rnn_type == "LSTM":
self.rnn = torch.nn.LSTM(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
else:
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
|
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU', 'LSTM', 'RNN_TANH' or 'RNN_RELU'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
# bidirectional RNN on top of embedding layer
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
self.rnn = torch.nn.RNNBase(
rnn_type,
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
|
https://github.com/flairNLP/flair/issues/780
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-19-e27894ad7da5> in <module>
1 assert torch.cuda.is_available(), "Do not run this model with GPU support."
2 # Run model
----> 3 model(config)
~/cloud/model.py in model(config, language, optimise)
77 anneal_factor=config.anneal_factor,
78 patience=config.patience,
---> 79 max_epochs=config.max_epochs)
80
81
~/anaconda3/envs/py36/lib/python3.6/site-packages/flair/trainers/trainer.py in train(self, base_path, evaluation_metric, learning_rate, mini_batch_size, eval_mini_batch_size, max_epochs, anneal_factor, patience, train_with_dev, monitor_train, embeddings_in_memory, checkpoint, save_final_model, anneal_with_restarts, shuffle, param_selection_mode, num_workers, **kwargs)
91 [Sentence("d", labels=["0.1"])],
92 eval_mini_batch_size,
---> 93 embeddings_in_memory,
94 )
95 if log_train:
~/anaconda3/envs/py36/lib/python3.6/site-packages/flair/models/text_classification_model.py in evaluate(self, sentences, eval_mini_batch_size, embeddings_in_memory, out_path)
175 batch_count += 1
176
--> 177 labels, loss = self.forward_labels_and_loss(batch)
178
179 clear_embeddings(
~/anaconda3/envs/py36/lib/python3.6/site-packages/flair/models/text_classification_model.py in forward_labels_and_loss(self, sentences)
106 self, sentences: Union[Sentence, List[Sentence]]
107 ) -> (List[List[Label]], torch.tensor):
--> 108 scores = self.forward(sentences)
109 labels = self._obtain_labels(scores)
110 loss = self._calculate_loss(scores, sentences)
~/anaconda3/envs/py36/lib/python3.6/site-packages/flair/models/text_classification_model.py in forward(self, sentences)
68
69 def forward(self, sentences) -> List[List[float]]:
---> 70 self.document_embeddings.embed(sentences)
71
72 text_embedding_list = [
~/anaconda3/envs/py36/lib/python3.6/site-packages/flair/embeddings.py in embed(self, sentences)
2079 self.rnn.flatten_parameters()
2080
-> 2081 rnn_out, hidden = self.rnn(packed)
2082
2083 outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(rnn_out)
~/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
~/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/modules/rnn.py in forward(self, input, hx)
206
207 self.check_forward_args(input, hx, batch_sizes)
--> 208 _impl = _rnn_impls[self.mode]
209 if batch_sizes is None:
210 result = _impl(input, hx, self._get_flat_weights(), self.bias, self.num_layers,
KeyError: 'LSTM'
|
KeyError
|
def __init__(
self,
text: str = None,
use_tokenizer: bool = False,
labels: Union[List[Label], List[str]] = None,
):
super(Sentence, self).__init__()
self.tokens: List[Token] = []
self.labels: List[Label] = []
if labels is not None:
self.add_labels(labels)
self._embeddings: Dict = {}
# if text is passed, instantiate sentence with tokens (words)
if text is not None:
# tokenize the text first if option selected
if use_tokenizer:
# use segtok for tokenization
tokens = []
sentences = split_single(text)
for sentence in sentences:
contractions = split_contractions(word_tokenizer(sentence))
tokens.extend(contractions)
# determine offsets for whitespace_after field
index = text.index
running_offset = 0
last_word_offset = -1
last_token = None
for word in tokens:
try:
word_offset = index(word, running_offset)
start_position = word_offset
except:
word_offset = last_word_offset + 1
start_position = (
running_offset + 1 if running_offset > 0 else running_offset
)
token = Token(word, start_position=start_position)
self.add_token(token)
if word_offset - 1 == last_word_offset and last_token is not None:
last_token.whitespace_after = False
word_len = len(word)
running_offset = word_offset + word_len
last_word_offset = running_offset - 1
last_token = token
# otherwise assumes whitespace tokenized text
else:
# catch the empty string case
if not text:
raise ValueError("Cannot convert empty string to a Sentence object.")
# add each word in tokenized string as Token object to Sentence
word = ""
for index, char in enumerate(text):
if char == " ":
if len(word) > 0:
token = Token(word, start_position=index - len(word))
self.add_token(token)
word = ""
else:
word += char
# increment for last token in sentence if not followed by whtespace
index += 1
if len(word) > 0:
token = Token(word, start_position=index - len(word))
self.add_token(token)
|
def __init__(
self,
text: str = None,
use_tokenizer: bool = False,
labels: Union[List[Label], List[str]] = None,
):
super(Sentence, self).__init__()
self.tokens: List[Token] = []
self.labels: List[Label] = []
if labels is not None:
self.add_labels(labels)
self._embeddings: Dict = {}
# if text is passed, instantiate sentence with tokens (words)
if text is not None:
# tokenize the text first if option selected
if use_tokenizer:
# use segtok for tokenization
tokens = []
sentences = split_single(text)
for sentence in sentences:
contractions = split_contractions(word_tokenizer(sentence))
tokens.extend(contractions)
# determine offsets for whitespace_after field
index = text.index
running_offset = 0
last_word_offset = -1
last_token = None
for word in tokens:
try:
word_offset = index(word, running_offset)
start_position = word_offset
except:
word_offset = last_word_offset + 1
start_position = (
running_offset + 1 if running_offset > 0 else running_offset
)
token = Token(word, start_position=start_position)
self.add_token(token)
if word_offset - 1 == last_word_offset and last_token is not None:
last_token.whitespace_after = False
word_len = len(word)
running_offset = word_offset + word_len
last_word_offset = running_offset - 1
last_token = token
# otherwise assumes whitespace tokenized text
else:
# add each word in tokenized string as Token object to Sentence
word = ""
for index, char in enumerate(text):
if char == " ":
if len(word) > 0:
token = Token(word, start_position=index - len(word))
self.add_token(token)
word = ""
else:
word += char
# increment for last token in sentence if not followed by whtespace
index += 1
if len(word) > 0:
token = Token(word, start_position=index - len(word))
self.add_token(token)
|
https://github.com/flairNLP/flair/issues/565
|
from flair.data import Sentence
s = ''
Sentence(s)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\XYZ\AppData\Local\Programs\Python\Python36\lib\site-packages\flair\data.py", line 338, in __init__
index += 1
UnboundLocalError: local variable 'index' referenced before assignment
|
UnboundLocalError
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added,
updates only if embeddings are non-static."""
# first, find longest sentence in batch
longest_sentence_in_batch: int = len(
max(
[
self.tokenizer.tokenize(sentence.to_tokenized_string())
for sentence in sentences
],
key=len,
)
)
# prepare id maps for BERT model
features = self._convert_sentences_to_features(sentences, longest_sentence_in_batch)
if torch.cuda.is_available():
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long
).cuda()
all_input_masks = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
).cuda()
else:
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_masks = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
)
# put encoded batch through BERT model to get all hidden states of all encoder layers
if torch.cuda.is_available():
self.model.cuda()
self.model.eval()
all_encoder_layers, _ = self.model(
all_input_ids, token_type_ids=None, attention_mask=all_input_masks
)
with torch.no_grad():
for sentence_index, sentence in enumerate(sentences):
feature = features[sentence_index]
# get aggregated embeddings for each BERT-subtoken in sentence
subtoken_embeddings = []
for token_index, _ in enumerate(feature.tokens):
all_layers = []
for layer_index in self.layer_indexes:
layer_output = (
all_encoder_layers[int(layer_index)]
.detach()
.cpu()[sentence_index]
)
all_layers.append(layer_output[token_index])
subtoken_embeddings.append(torch.cat(all_layers))
# get the current sentence object
token_idx = 0
for token in sentence:
# add concatenated embedding to sentence
token_idx += 1
if self.pooling_operation == "first":
# use first subword embedding if pooling operation is 'first'
token.set_embedding(self.name, subtoken_embeddings[token_idx])
else:
# otherwise, do a mean over all subwords in token
embeddings = subtoken_embeddings[
token_idx : token_idx + feature.token_subtoken_count[token.idx]
]
embeddings = [embedding.unsqueeze(0) for embedding in embeddings]
mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
token_idx += feature.token_subtoken_count[token.idx] - 1
return sentences
|
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added,
updates only if embeddings are non-static."""
# first, find longest sentence in batch
longest_sentence_in_batch: int = len(
max(
[
self.tokenizer.tokenize(sentence.to_tokenized_string())
for sentence in sentences
],
key=len,
)
)
# prepare id maps for BERT model
features = self._convert_sentences_to_features(sentences, longest_sentence_in_batch)
if torch.cuda.is_available():
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long
).cuda()
all_input_masks = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
).cuda()
else:
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_masks = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
)
# put encoded batch through BERT model to get all hidden states of all encoder layers
self.model.eval()
all_encoder_layers, _ = self.model(
all_input_ids, token_type_ids=None, attention_mask=all_input_masks
)
with torch.no_grad():
for sentence_index, sentence in enumerate(sentences):
feature = features[sentence_index]
# get aggregated embeddings for each BERT-subtoken in sentence
subtoken_embeddings = []
for token_index, _ in enumerate(feature.tokens):
all_layers = []
for layer_index in self.layer_indexes:
layer_output = (
all_encoder_layers[int(layer_index)]
.detach()
.cpu()[sentence_index]
)
all_layers.append(layer_output[token_index])
subtoken_embeddings.append(torch.cat(all_layers))
# get the current sentence object
token_idx = 0
for token in sentence:
# add concatenated embedding to sentence
token_idx += 1
if self.pooling_operation == "first":
# use first subword embedding if pooling operation is 'first'
token.set_embedding(self.name, subtoken_embeddings[token_idx])
else:
# otherwise, do a mean over all subwords in token
embeddings = subtoken_embeddings[
token_idx : token_idx + feature.token_subtoken_count[token.idx]
]
embeddings = [embedding.unsqueeze(0) for embedding in embeddings]
mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
token_idx += feature.token_subtoken_count[token.idx] - 1
return sentences
|
https://github.com/flairNLP/flair/issues/325
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-1-47fbfcb3fd4f> in <module>
5 BertEmbedding = BertEmbeddings()
6 sent = Sentence('I love Austin')
----> 7 BertEmbedding.embed(sent)
8 # fe.embed(sent)
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/flair/embeddings.py in embed(self, sentences)
54
55 if not everything_embedded or not self.static_embeddings:
---> 56 self._add_embeddings_internal(sentences)
57
58 return sentences
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/flair/embeddings.py in _add_embeddings_internal(self, sentences)
957 # put encoded batch through BERT model to get all hidden states of all encoder layers
958 self.model.eval()
--> 959 all_encoder_layers, _ = self.model(all_input_ids, token_type_ids=None, attention_mask=all_input_masks)
960
961 with torch.no_grad():
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/pytorch_pretrained_bert/modeling.py in forward(self, input_ids, token_type_ids, attention_mask, output_all_encoded_layers)
607 extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
608
--> 609 embedding_output = self.embeddings(input_ids, token_type_ids)
610 encoded_layers = self.encoder(embedding_output,
611 extended_attention_mask,
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/pytorch_pretrained_bert/modeling.py in forward(self, input_ids, token_type_ids)
191 token_type_ids = torch.zeros_like(input_ids)
192
--> 193 words_embeddings = self.word_embeddings(input_ids)
194 position_embeddings = self.position_embeddings(position_ids)
195 token_type_embeddings = self.token_type_embeddings(token_type_ids)
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/torch/nn/modules/sparse.py in forward(self, input)
116 return F.embedding(
117 input, self.weight, self.padding_idx, self.max_norm,
--> 118 self.norm_type, self.scale_grad_by_freq, self.sparse)
119
120 def extra_repr(self):
~/anaconda3/envs/gpu-dev/lib/python3.6/site-packages/torch/nn/functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1452 # remove once script supports set_grad_enabled
1453 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1454 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1455
1456
RuntimeError: Expected object of backend CPU but got backend CUDA for argument #3 'index'
|
RuntimeError
|
def predict(
self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32
) -> List[Sentence]:
if type(sentences) is Sentence:
sentences = [sentences]
# remove previous embeddings
clear_embeddings(sentences)
# make mini-batches
batches = [
sentences[x : x + mini_batch_size]
for x in range(0, len(sentences), mini_batch_size)
]
for batch in batches:
score, tag_seq = self._predict_scores_batch(batch)
predicted_id = tag_seq
all_tokens = []
for sentence in batch:
all_tokens.extend(sentence.tokens)
for token, pred_id in zip(all_tokens, predicted_id):
token: Token = token
# get the predicted tag
predicted_tag = self.tag_dictionary.get_item_for_index(pred_id)
token.add_tag(self.tag_type, predicted_tag)
return sentences
|
def predict(
self, sentences: Union[List[Sentence], Sentence], mini_batch_size=32
) -> List[Sentence]:
if type(sentences) is Sentence:
sentences = [sentences]
# make mini-batches
batches = [
sentences[x : x + mini_batch_size]
for x in range(0, len(sentences), mini_batch_size)
]
for batch in batches:
score, tag_seq = self._predict_scores_batch(batch)
predicted_id = tag_seq
all_tokens = []
for sentence in batch:
all_tokens.extend(sentence.tokens)
for token, pred_id in zip(all_tokens, predicted_id):
token: Token = token
# get the predicted tag
predicted_tag = self.tag_dictionary.get_item_for_index(pred_id)
token.add_tag(self.tag_type, predicted_tag)
return sentences
|
https://github.com/flairNLP/flair/issues/23
|
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-4-4d86fef1ef40> in <module>()
----> 1 tagger = SequenceTagger.load('ner-ontonotes')
~/.virtualenvs/py36/lib/python3.6/site-packages/flair/tagging_model.py in load(model)
459
460 if model_file is not None:
--> 461 tagger: SequenceTagger = SequenceTagger.load_from_file(model_file)
462 return tagger
463
~/.virtualenvs/py36/lib/python3.6/site-packages/flair/tagging_model.py in load_from_file(cls, model_file)
123 # serialization of torch objects
124 warnings.filterwarnings("ignore")
--> 125 state = torch.load(model_file, map_location={'cuda:0': 'cpu'})
126 warnings.filterwarnings("default")
127
~/.virtualenvs/py36/lib/python3.6/site-packages/torch/serialization.py in load(f, map_location, pickle_module)
301 f = open(f, 'rb')
302 try:
--> 303 return _load(f, map_location, pickle_module)
304 finally:
305 if new_fd:
~/.virtualenvs/py36/lib/python3.6/site-packages/torch/serialization.py in _load(f, map_location, pickle_module)
467 unpickler = pickle_module.Unpickler(f)
468 unpickler.persistent_load = persistent_load
--> 469 result = unpickler.load()
470
471 deserialized_storage_keys = pickle_module.load(f)
OSError: [Errno 22] Invalid argument
|
OSError
|
def __init__(
self, text: str = None, use_tokenizer: bool = False, labels: List[Label] = None
):
super(Sentence, self).__init__()
self.tokens: List[Token] = []
self.labels: List[Label] = labels
self._embeddings: Dict = {}
# if text is passed, instantiate sentence with tokens (words)
if text is not None:
# tokenize the text first if option selected
if use_tokenizer:
# use segtok for tokenization
tokens = []
sentences = split_single(text)
for sentence in sentences:
contractions = split_contractions(word_tokenizer(sentence))
tokens.extend(contractions)
# determine offsets for whitespace_after field
index = text.index
running_offset = 0
last_word_offset = -1
last_token = None
for word in tokens:
token = Token(word)
self.add_token(token)
try:
word_offset = index(word, running_offset)
except:
word_offset = last_word_offset + 1
if word_offset - 1 == last_word_offset and last_token is not None:
last_token.whitespace_after = False
word_len = len(word)
running_offset = word_offset + word_len
last_word_offset = running_offset - 1
last_token = token
# otherwise assumes whitespace tokenized text
else:
# add each word in tokenized string as Token object to Sentence
for word in text.split(" "):
if word:
token = Token(word)
self.add_token(token)
|
def __init__(
self, text: str = None, use_tokenizer: bool = False, labels: List[Label] = None
):
super(Sentence, self).__init__()
self.tokens: List[Token] = []
self.labels: List[Label] = labels
self._embeddings: Dict = {}
# if text is passed, instantiate sentence with tokens (words)
if text is not None:
# tokenize the text first if option selected
if use_tokenizer:
# use segtok for tokenization
tokens = []
sentences = split_single(text)
for sentence in sentences:
contractions = split_contractions(word_tokenizer(sentence))
tokens.extend(contractions)
# determine offsets for whitespace_after field
index = text.index
running_offset = 0
last_word_offset = -1
last_token = None
for word in tokens:
token = Token(word)
self.add_token(token)
try:
word_offset = index(word, running_offset)
except:
word_offset = last_word_offset + 1
if word_offset - 1 == last_word_offset and last_token is not None:
last_token.whitespace_after = False
word_len = len(word)
running_offset = word_offset + word_len
last_word_offset = running_offset - 1
last_token = token
# otherwise assumes whitespace tokenized text
else:
# add each word in tokenized string as Token object to Sentence
for word in text.split(" "):
token = Token(word)
self.add_token(token)
|
https://github.com/flairNLP/flair/issues/83
|
Traceback (most recent call last):
File "predict.py", line 8, in <module>
tagger.predict(sentence)
File "/home/projects/ner/flair/flair/models/sequence_tagger_model.py", line 371, in predict
score, tag_seq = self._predict_scores_batch(batch)
File "/home/projects/ner/flair/flair/models/sequence_tagger_model.py", line 386, in _predict_scores_batch
all_feats, tags = self.forward(sentences)
File "/home/projects/ner/flair/flair/models/sequence_tagger_model.py", line 157, in forward
self.embeddings.embed(sentences)
File "/home/projects/ner/flair/flair/embeddings.py", line 115, in embed
embedding.embed(sentences)
File "/home/projects/ner/flair/flair/embeddings.py", line 48, in embed
self._add_embeddings_internal(sentences)
File "/home/projects/ner/flair/flair/embeddings.py", line 268, in _add_embeddings_internal
tokens_sorted_by_length = sorted(tokens_char_indices, key=lambda p: len(p), reverse=True)
File "/home/miniconda3/lib/python3.6/site-packages/torch/onnx/__init__.py", line 68, in wrapper
return fn(*args, **kwargs)
File "/home/miniconda3/lib/python3.6/site-packages/torch/nn/utils/rnn.py", line 141, in pack_padded_sequence
data, batch_sizes = PackPadded.apply(input, lengths, batch_first)
File "/home/miniconda3/lib/python3.6/site-packages/torch/nn/_functions/packing.py", line 12, in forward
raise ValueError("Length of all samples has to be greater than 0, "
ValueError: Length of all samples has to be greater than 0, but found an element in 'lengths' that is <= 0
|
ValueError
|
def initialize(self, argv=None):
"""Initialize application, notebooks, writer, and postprocessor"""
# See https://bugs.python.org/issue37373 :(
if (
sys.version_info[0] == 3
and sys.version_info[1] >= 8
and sys.platform.startswith("win")
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
self.init_syspath()
super().initialize(argv)
self.init_notebooks()
self.init_writer()
self.init_postprocessor()
|
def initialize(self, argv=None):
"""Initialize application, notebooks, writer, and postprocessor"""
self.init_syspath()
super().initialize(argv)
self.init_notebooks()
self.init_writer()
self.init_postprocessor()
|
https://github.com/jupyter/nbconvert/issues/1372
|
[NbConvertApp] Converting notebook C:\projects\matplotlib\lib\matplotlib\tests\test_nbagg_01.ipynb to notebook
Traceback (most recent call last):
File "c:\miniconda37-x64\envs\test-environment\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "c:\miniconda37-x64\envs\test-environment\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Miniconda37-x64\envs\test-environment\Scripts\jupyter-nbconvert.EXE\__main__.py", line 7, in <module>
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_core\application.py", line 270, in launch_instance
return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\traitlets\config\application.py", line 837, in launch_instance
app.start()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\nbconvertapp.py", line 345, in start
self.convert_notebooks()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\nbconvertapp.py", line 519, in convert_notebooks
self.convert_single_notebook(notebook_filename)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\nbconvertapp.py", line 484, in convert_single_notebook
output, resources = self.export_single_notebook(notebook_filename, resources, input_buffer=input_buffer)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\nbconvertapp.py", line 413, in export_single_notebook
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\exporters\exporter.py", line 182, in from_filename
return self.from_file(f, resources=resources, **kw)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\exporters\exporter.py", line 200, in from_file
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\exporters\notebook.py", line 32, in from_notebook_node
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\exporters\exporter.py", line 144, in from_notebook_node
nb_copy, resources = self._preprocess(nb_copy, resources)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\exporters\exporter.py", line 319, in _preprocess
nbc, resc = preprocessor(nbc, resc)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\preprocessors\base.py", line 47, in __call__
return self.preprocess(nb, resources)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbconvert\preprocessors\execute.py", line 65, in preprocess
self.execute()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbclient\util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbclient\util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "c:\miniconda37-x64\envs\test-environment\lib\asyncio\base_events.py", line 616, in run_until_complete
return future.result()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbclient\client.py", line 519, in async_execute
async with self.async_setup_kernel(**kwargs):
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\async_generator\_util.py", line 34, in __aenter__
return await self._agen.asend(None)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbclient\client.py", line 478, in async_setup_kernel
await self.async_start_new_kernel_client()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbclient\client.py", line 403, in async_start_new_kernel_client
await ensure_async(self.kc.start_channels())
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\client.py", line 106, in start_channels
self.shell_channel.start()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\client.py", line 151, in shell_channel
socket = self.connect_shell(identity=self.session.bsession)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\connect.py", line 561, in connect_shell
return self._create_connected_socket('shell', identity=identity)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\connect.py", line 545, in _create_connected_socket
sock = self.context.socket(socket_type)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\sugar\context.py", line 226, in socket
s = self._socket_class(self, socket_type, **kwargs)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\_future.py", line 144, in __init__
self._init_io_state()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\asyncio\__init__.py", line 53, in _init_io_state
self.io_loop.add_reader(self._fd, lambda : self._handle_events(0, 0))
File "c:\miniconda37-x64\envs\test-environment\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\nbclient\client.py", line 360, in _async_cleanup_kernel
await ensure_async(self.kc.stop_channels())
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\client.py", line 125, in stop_channels
if self.shell_channel.is_alive():
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\client.py", line 151, in shell_channel
socket = self.connect_shell(identity=self.session.bsession)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\connect.py", line 561, in connect_shell
return self._create_connected_socket('shell', identity=identity)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\jupyter_client\connect.py", line 545, in _create_connected_socket
sock = self.context.socket(socket_type)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\sugar\context.py", line 226, in socket
s = self._socket_class(self, socket_type, **kwargs)
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\_future.py", line 144, in __init__
self._init_io_state()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\asyncio\__init__.py", line 53, in _init_io_state
self.io_loop.add_reader(self._fd, lambda : self._handle_events(0, 0))
File "c:\miniconda37-x64\envs\test-environment\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
Exception ignored in: <function Socket.__del__ at 0x0000004DEB3CF670>
Traceback (most recent call last):
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\sugar\socket.py", line 67, in __del__
self.close()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\_future.py", line 160, in close
self._clear_io_state()
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\asyncio\__init__.py", line 60, in _clear_io_state
self.io_loop.remove_reader(self._fd)
File "c:\miniconda37-x64\envs\test-environment\lib\asyncio\events.py", line 504, in remove_reader
raise NotImplementedError
NotImplementedError:
Exception ignored in: <function Socket.__del__ at 0x0000004DEB3CF670>
Traceback (most recent call last):
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\sugar\socket.py", line 67, in __del__
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\_future.py", line 160, in close
File "c:\miniconda37-x64\envs\test-environment\lib\site-packages\zmq\asyncio\__init__.py", line 60, in _clear_io_state
File "c:\miniconda37-x64\envs\test-environment\lib\asyncio\events.py", line 504, in remove_reader
NotImplementedError:
|
NotImplementedError
|
def get_export_names(config=get_config()):
"""Return a list of the currently supported export targets
Exporters can be found in external packages by registering
them as an nbconvert.exporter entrypoint.
"""
exporters = sorted(entrypoints.get_group_named("nbconvert.exporters"))
enabled_exporters = []
for exporter_name in exporters:
try:
e = get_exporter(exporter_name)(config=config)
if e.enabled:
enabled_exporters.append(exporter_name)
except (ExporterDisabledError, ValueError):
pass
return enabled_exporters
|
def get_export_names(config=get_config()):
"""Return a list of the currently supported export targets
Exporters can be found in external packages by registering
them as an nbconvert.exporter entrypoint.
"""
exporters = sorted(entrypoints.get_group_named("nbconvert.exporters"))
enabled_exporters = []
for exporter_name in exporters:
try:
e = get_exporter(exporter_name)(config=config)
if e.enabled:
enabled_exporters.append(exporter_name)
except ExporterDisabledError:
pass
return enabled_exporters
|
https://github.com/jupyter/nbconvert/issues/1366
|
Traceback (most recent call last):
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/tornado/web.py", line 1701, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/tornado/web.py", line 3178, in wrapper
return method(self, *args, **kwargs)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/jupyter_server/services/nbconvert/handlers.py", line 17, in get
exporters = base.get_export_names()
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/base.py", line 141, in get_export_names
e = get_exporter(exporter_name)(config=config)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/base.py", line 102, in get_exporter
if getattr(exporter(config=config), 'enabled', True):
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/templateexporter.py", line 323, in __init__
super().__init__(config=config, **kw)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/exporter.py", line 115, in __init__
self._init_preprocessors()
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/templateexporter.py", line 489, in _init_preprocessors
conf = self._get_conf()
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/templateexporter.py", line 505, in _get_conf
for path in map(Path, self.template_paths):
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/traitlets/traitlets.py", line 573, in __get__
return self.get(obj, cls)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/traitlets/traitlets.py", line 536, in get
default = obj.trait_defaults(self.name)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/traitlets/traitlets.py", line 1575, in trait_defaults
return self._get_trait_default_generator(names[0])(self)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/traitlets/traitlets.py", line 973, in __call__
return self.func(*args, **kwargs)
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/templateexporter.py", line 516, in _template_paths
template_names = self.get_template_names()
File "/Users/stslve/miniconda/envs/jlab-master/lib/python3.7/site-packages/nbconvert/exporters/templateexporter.py", line 577, in get_template_names
raise ValueError('No template sub-directory with name %r found in the following paths:\n\t%s' % (base_template, paths))
ValueError: No template sub-directory with name 'asciidoc' found in the following paths:
/tmp/tmpcp62hwjq/jupyter_data
/tmp/tmpcp62hwjq/env/share/jupyter
/tmp/tmpcp62hwjq/share/jupyter
|
ValueError
|
def transform(data, func):
if compat.USE_PYGEOS:
coords = pygeos.get_coordinates(data)
new_coords = func(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(data.copy(), np.array(new_coords).T)
return result
else:
from shapely.ops import transform
n = len(data)
result = np.empty(n, dtype=object)
for i in range(n):
geom = data[i]
if _isna(geom):
result[i] = geom
else:
result[i] = transform(func, geom)
return result
|
def transform(data, func):
if compat.USE_PYGEOS:
coords = pygeos.get_coordinates(data)
new_coords = func(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(data.copy(), np.array(new_coords).T)
return result
else:
from shapely.ops import transform
n = len(data)
result = np.empty(n, dtype=object)
for i in range(n):
geom = data[i]
result[i] = transform(func, geom)
return result
|
https://github.com/geopandas/geopandas/issues/1573
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
in
15
16 gdf.loc[3,'geometry'] = None
---> 17 gdf.to_crs(epsg=5347)
/opt/miniconda3/envs/stable/lib/python3.8/site-packages/geopandas/geodataframe.py in to_crs(self, crs, epsg, inplace)
814 else:
815 df = self.copy()
--> 816 geom = df.geometry.to_crs(crs=crs, epsg=epsg)
817 df.geometry = geom
818 df.crs = geom.crs
/opt/miniconda3/envs/stable/lib/python3.8/site-packages/geopandas/geoseries.py in to_crs(self, crs, epsg)
541 transformer = Transformer.from_crs(self.crs, crs, always_xy=True)
542
--> 543 new_data = vectorized.transform(self.values.data, transformer.transform)
544 return GeoSeries(
545 GeometryArray(new_data), crs=crs, index=self.index, name=self.name
/opt/miniconda3/envs/stable/lib/python3.8/site-packages/geopandas/_vectorized.py in transform(data, func)
887 for i in range(n):
888 geom = data[i]
--> 889 result[i] = transform(func, geom)
890
891 return result
/opt/miniconda3/envs/stable/lib/python3.8/site-packages/shapely/ops.py in transform(func, geom)
222 also satisfy the requirements for `func`.
223 """
--> 224 if geom.is_empty:
225 return geom
226 if geom.type in ('Point', 'LineString', 'LinearRing', 'Polygon'):
AttributeError: 'NoneType' object has no attribute 'is_empty'
|
AttributeError
|
def _mapclassify_choro(values, scheme, **classification_kwds):
"""
Wrapper for choropleth schemes from mapclassify for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme : str
One of mapclassify classification schemes
Options are BoxPlot, EqualInterval, FisherJenks,
FisherJenksSampled, HeadTailBreaks, JenksCaspall,
JenksCaspallForced, JenksCaspallSampled, MaxP,
MaximumBreaks, NaturalBreaks, Quantiles, Percentiles, StdMean,
UserDefined
**classification_kwds : dict
Keyword arguments for classification scheme
For details see mapclassify documentation:
https://mapclassify.readthedocs.io/en/latest/api.html
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
import mapclassify.classifiers as classifiers
except ImportError:
raise ImportError(
"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword"
)
from mapclassify import __version__ as mc_version
if mc_version < LooseVersion("2.2.0"):
raise ImportError(
"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword"
)
schemes = {}
for classifier in classifiers.CLASSIFIERS:
schemes[classifier.lower()] = getattr(classifiers, classifier)
scheme = scheme.lower()
# mapclassify < 2.1 cleaned up the scheme names (removing underscores)
# trying both to keep compatibility with older versions and provide
# compatibility with newer versions of mapclassify
oldnew = {
"Box_Plot": "BoxPlot",
"Equal_Interval": "EqualInterval",
"Fisher_Jenks": "FisherJenks",
"Fisher_Jenks_Sampled": "FisherJenksSampled",
"HeadTail_Breaks": "HeadTailBreaks",
"Jenks_Caspall": "JenksCaspall",
"Jenks_Caspall_Forced": "JenksCaspallForced",
"Jenks_Caspall_Sampled": "JenksCaspallSampled",
"Max_P_Plassifier": "MaxP",
"Maximum_Breaks": "MaximumBreaks",
"Natural_Breaks": "NaturalBreaks",
"Std_Mean": "StdMean",
"User_Defined": "UserDefined",
}
scheme_names_mapping = {}
scheme_names_mapping.update(
{old.lower(): new.lower() for old, new in oldnew.items()}
)
scheme_names_mapping.update(
{new.lower(): old.lower() for old, new in oldnew.items()}
)
try:
scheme_class = schemes[scheme]
except KeyError:
scheme = scheme_names_mapping.get(scheme, scheme)
try:
scheme_class = schemes[scheme]
except KeyError:
raise ValueError(
"Invalid scheme. Scheme must be in the set: %r" % schemes.keys()
)
if classification_kwds["k"] is not None:
from inspect import getfullargspec as getspec
spec = getspec(scheme_class.__init__)
if "k" not in spec.args:
del classification_kwds["k"]
try:
binning = scheme_class(np.asarray(values), **classification_kwds)
except TypeError:
raise TypeError("Invalid keyword argument for %r " % scheme)
return binning
|
def _mapclassify_choro(values, scheme, **classification_kwds):
"""
Wrapper for choropleth schemes from mapclassify for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme : str
One of mapclassify classification schemes
Options are BoxPlot, EqualInterval, FisherJenks,
FisherJenksSampled, HeadTailBreaks, JenksCaspall,
JenksCaspallForced, JenksCaspallSampled, MaxP,
MaximumBreaks, NaturalBreaks, Quantiles, Percentiles, StdMean,
UserDefined
**classification_kwds : dict
Keyword arguments for classification scheme
For details see mapclassify documentation:
https://mapclassify.readthedocs.io/en/latest/api.html
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
import mapclassify.classifiers as classifiers
except ImportError:
raise ImportError(
"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword"
)
from mapclassify import __version__ as mc_version
if mc_version < LooseVersion("2.2.0"):
raise ImportError(
"The 'mapclassify' >= 2.2.0 package is required to use the 'scheme' keyword"
)
schemes = {}
for classifier in classifiers.CLASSIFIERS:
schemes[classifier.lower()] = getattr(classifiers, classifier)
scheme = scheme.lower()
# mapclassify < 2.1 cleaned up the scheme names (removing underscores)
# trying both to keep compatibility with older versions and provide
# compatibility with newer versions of mapclassify
oldnew = {
"Box_Plot": "BoxPlot",
"Equal_Interval": "EqualInterval",
"Fisher_Jenks": "FisherJenks",
"Fisher_Jenks_Sampled": "FisherJenksSampled",
"HeadTail_Breaks": "HeadTailBreaks",
"Jenks_Caspall": "JenksCaspall",
"Jenks_Caspall_Forced": "JenksCaspallForced",
"Jenks_Caspall_Sampled": "JenksCaspallSampled",
"Max_P_Plassifier": "MaxP",
"Maximum_Breaks": "MaximumBreaks",
"Natural_Breaks": "NaturalBreaks",
"Std_Mean": "StdMean",
"User_Defined": "UserDefined",
}
scheme_names_mapping = {}
scheme_names_mapping.update(
{old.lower(): new.lower() for old, new in oldnew.items()}
)
scheme_names_mapping.update(
{new.lower(): old.lower() for old, new in oldnew.items()}
)
try:
scheme_class = schemes[scheme]
except KeyError:
scheme = scheme_names_mapping.get(scheme, scheme)
try:
scheme_class = schemes[scheme]
except KeyError:
raise ValueError(
"Invalid scheme. Scheme must be in the set: %r" % schemes.keys()
)
if classification_kwds["k"] is not None:
from inspect import getfullargspec as getspec
spec = getspec(scheme_class.__init__)
if "k" not in spec.args:
del classification_kwds["k"]
try:
binning = scheme_class(values, **classification_kwds)
except TypeError:
raise TypeError("Invalid keyword argument for %r " % scheme)
return binning
|
https://github.com/geopandas/geopandas/issues/1486
|
df = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
df.plot(column='pop_est', scheme='JenksCaspallSampled')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-18-3fe371919d49> in <module>
----> 1 df.plot(column='pop_est', scheme='JenksCaspallSampled')
~/Dropbox/Python/geopandas/martinfleis/geopandas/geodataframe.py in plot(self, *args, **kwargs)
897 from there.
898 """
--> 899 return plot_dataframe(self, *args, **kwargs)
900
901 plot.__doc__ = plot_dataframe.__doc__
~/Dropbox/Python/geopandas/martinfleis/geopandas/plotting.py in plot_dataframe(df, column, cmap, color, ax, cax, categorical, legend, scheme, k, vmin, vmax, markersize, figsize, legend_kwds, categories, classification_kwds, missing_kwds, aspect, **style_kwds)
672 classification_kwds["k"] = k
673
--> 674 binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds)
675 # set categorical to True for creating the legend
676 categorical = True
~/Dropbox/Python/geopandas/martinfleis/geopandas/plotting.py in _mapclassify_choro(values, scheme, **classification_kwds)
917 del classification_kwds["k"]
918 try:
--> 919 binning = scheme_class(values, **classification_kwds)
920 except TypeError:
921 raise TypeError("Invalid keyword argument for %r " % scheme)
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/mapclassify/classifiers.py in __init__(self, y, k, pct)
2037 self.name = "JenksCaspallSampled"
2038 self.y = y
-> 2039 self._summary() # have to recalculate summary stats
2040
2041 def _set_bins(self):
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/mapclassify/classifiers.py in _summary(self)
625 def _summary(self):
626 yb = self.yb
--> 627 self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
628 self.tss = self.get_tss()
629 self.adcm = self.get_adcm()
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/mapclassify/classifiers.py in <listcomp>(.0)
625 def _summary(self):
626 yb = self.yb
--> 627 self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
628 self.tss = self.get_tss()
629 self.adcm = self.get_adcm()
<__array_function__ internals> in nonzero(*args, **kwargs)
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/numpy/core/fromnumeric.py in nonzero(a)
1894
1895 """
-> 1896 return _wrapfunc(a, 'nonzero')
1897
1898
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
56 bound = getattr(obj, method, None)
57 if bound is None:
---> 58 return _wrapit(obj, method, *args, **kwds)
59
60 try:
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/numpy/core/fromnumeric.py in _wrapit(obj, method, *args, **kwds)
49 if not isinstance(result, mu.ndarray):
50 result = asarray(result)
---> 51 result = wrap(result)
52 return result
53
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/pandas/core/generic.py in __array_wrap__(self, result, context)
1916 return result
1917 d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
-> 1918 return self._constructor(result, **d).__finalize__(self)
1919
1920 # ideally we would define this to avoid the getattr checks, but
/opt/miniconda3/envs/geo_dev/lib/python3.7/site-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
290 if len(index) != len(data):
291 raise ValueError(
--> 292 f"Length of passed values is {len(data)}, "
293 f"index implies {len(index)}."
294 )
ValueError: Length of passed values is 1, index implies 177.
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.