after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def dispatchStringRegex(self, update):
"""
Dispatches an update to all string regex handlers that match the
string.
Args:
command (str): The command keyword
update (str): The string that contains the command
"""
matching_handlers = []
for matcher in self.string_regex_handlers:
if match(matcher, update):
for handler in self.string_regex_handlers[matcher]:
matching_handlers.append(handler)
self.dispatchTo(matching_handlers, update)
|
def dispatchStringRegex(self, update):
"""
Dispatches an update to all string regex handlers that match the
string.
Args:
command (str): The command keyword
update (telegram.Update): The Telegram update that contains the
command
"""
matching_handlers = []
for matcher in self.string_regex_handlers:
if match(matcher, update):
for handler in self.string_regex_handlers[matcher]:
matching_handlers.append(handler)
self.dispatchTo(matching_handlers, update)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/123
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Program Files\Python 3.5\lib\threading.py", line 923, in _bootstrap_inner
self.run()
File "C:\Program Files\Python 3.5\lib\threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "C:\Program Files\Python 3.5\lib\site-packages\telegram\dispatcher.py", line 39, in pooled
result = func(*args, **kwargs)
TypeError: akkarin() missing 1 required positional argument: 'args'
|
TypeError
|
def dispatchTo(self, handlers, update, **kwargs):
"""
Dispatches an update to a list of handlers.
Args:
handlers (list): A list of handler-functions.
update (any): The update to be dispatched
"""
for handler in handlers:
self.call_handler(handler, update, **kwargs)
|
def dispatchTo(self, handlers, update):
"""
Dispatches an update to a list of handlers.
Args:
handlers (list): A list of handler-functions.
update (any): The update to be dispatched
"""
for handler in handlers:
self.call_handler(handler, update)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/123
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Program Files\Python 3.5\lib\threading.py", line 923, in _bootstrap_inner
self.run()
File "C:\Program Files\Python 3.5\lib\threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "C:\Program Files\Python 3.5\lib\site-packages\telegram\dispatcher.py", line 39, in pooled
result = func(*args, **kwargs)
TypeError: akkarin() missing 1 required positional argument: 'args'
|
TypeError
|
def call_handler(self, handler, update, **kwargs):
"""
Calls an update handler. Checks the handler for keyword arguments and
fills them, if possible.
Args:
handler (function): An update handler function
update (any): An update
"""
target_kwargs = {}
fargs = getargspec(handler).args
"""
async handlers will receive all optional arguments, since we can't
their argument list.
"""
is_async = "pargs" == getargspec(handler).varargs
if is_async or "update_queue" in fargs:
target_kwargs["update_queue"] = self.update_queue
if is_async or "args" in fargs:
if isinstance(update, Update):
args = update.message.text.split(" ")[1:]
elif isinstance(update, str):
args = update.split(" ")[1:]
else:
args = None
target_kwargs["args"] = args
if is_async or "groups" in fargs:
target_kwargs["groups"] = kwargs.get("groups", None)
if is_async or "groupdict" in fargs:
target_kwargs["groupdict"] = kwargs.get("groupdict", None)
handler(self.bot, update, **target_kwargs)
|
def call_handler(self, handler, update):
"""
Calls an update handler. Checks the handler for keyword arguments and
fills them, if possible.
Args:
handler (function): An update handler function
update (any): An update
"""
kwargs = {}
fargs = getargspec(handler).args
if "update_queue" in fargs:
kwargs["update_queue"] = self.update_queue
if "args" in fargs:
if isinstance(update, Update):
args = update.message.text.split(" ")[1:]
elif isinstance(update, str):
args = update.split(" ")[1:]
else:
args = None
kwargs["args"] = args
handler(self.bot, update, **kwargs)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/123
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Program Files\Python 3.5\lib\threading.py", line 923, in _bootstrap_inner
self.run()
File "C:\Program Files\Python 3.5\lib\threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "C:\Program Files\Python 3.5\lib\site-packages\telegram\dispatcher.py", line 39, in pooled
result = func(*args, **kwargs)
TypeError: akkarin() missing 1 required positional argument: 'args'
|
TypeError
|
def to_dict(self):
data = {
"message_id": self.message_id,
"from": self.from_user.to_dict(),
"chat": self.chat.to_dict(),
}
try:
# Python 3.3+ supports .timestamp()
data["date"] = int(self.date.timestamp())
if self.forward_date:
data["forward_date"] = int(self.forward_date.timestamp())
except AttributeError:
# _totimestamp() for Python 3 (< 3.3) and Python 2
data["date"] = self._totimestamp(self.date)
if self.forward_date:
data["forward_date"] = self._totimestamp(self.forward_date)
if self.forward_from:
data["forward_from"] = self.forward_from.to_dict()
if self.reply_to_message:
data["reply_to_message"] = self.reply_to_message
if self.text:
data["text"] = self.text
if self.audio:
data["audio"] = self.audio.to_dict()
if self.document:
data["document"] = self.document.to_dict()
if self.photo:
data["photo"] = [p.to_dict() for p in self.photo]
if self.sticker:
data["sticker"] = self.sticker.to_dict()
if self.video:
data["video"] = self.video.to_dict()
if self.caption:
data["caption"] = self.caption
if self.contact:
data["contact"] = self.contact.to_dict()
if self.location:
data["location"] = self.location.to_dict()
if self.new_chat_participant:
data["new_chat_participant"] = self.new_chat_participant
if self.left_chat_participant:
data["left_chat_participant"] = self.left_chat_participant
if self.new_chat_title:
data["new_chat_title"] = self.new_chat_title
if self.new_chat_photo:
data["new_chat_photo"] = self.new_chat_photo
if self.delete_chat_photo:
data["delete_chat_photo"] = self.delete_chat_photo
if self.group_chat_created:
data["group_chat_created"] = self.group_chat_created
return data
|
def to_dict(self):
data = {
"message_id": self.message_id,
"from": self.from_user.to_dict(),
"chat": self.chat.to_dict(),
}
try:
# Python 3.3+ supports .timestamp()
data["date"] = int(self.date.timestamp())
if self.forward_date:
data["forward_date"] = int(self.forward_date.timestamp())
except AttributeError:
# _totimestamp() for Python 3 (< 3.3) and Python 2
data["date"] = self._totimestamp(self.date)
if self.forward_date:
data["forward_date"] = self._totimestamp(self.forward_date)
if self.forward_from:
data["forward_from"] = self.forward_from
if self.reply_to_message:
data["reply_to_message"] = self.reply_to_message
if self.text:
data["text"] = self.text
if self.audio:
data["audio"] = self.audio.to_dict()
if self.document:
data["document"] = self.document.to_dict()
if self.photo:
data["photo"] = [p.to_dict() for p in self.photo]
if self.sticker:
data["sticker"] = self.sticker.to_dict()
if self.video:
data["video"] = self.video.to_dict()
if self.caption:
data["caption"] = self.caption
if self.contact:
data["contact"] = self.contact.to_dict()
if self.location:
data["location"] = self.location.to_dict()
if self.new_chat_participant:
data["new_chat_participant"] = self.new_chat_participant
if self.left_chat_participant:
data["left_chat_participant"] = self.left_chat_participant
if self.new_chat_title:
data["new_chat_title"] = self.new_chat_title
if self.new_chat_photo:
data["new_chat_photo"] = self.new_chat_photo
if self.delete_chat_photo:
data["delete_chat_photo"] = self.delete_chat_photo
if self.group_chat_created:
data["group_chat_created"] = self.group_chat_created
return data
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/38
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/rq/worker.py", line 558, in perform_job
rv = job.perform()
File "/usr/local/lib/python2.7/dist-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "/home/joker/projects/bot/operations.py", line 50, in handle_update
handle_command(text, message)
File "/home/joker/projects/bot/operations.py", line 77, in handle_command
result = getattr(botcommands, command)(message, debug=debug)
File "/home/joker/projects/bot/config.py", line 85, in wrapper
MQTT['topic'], payload=msg.to_json(),
File "/usr/local/lib/python2.7/dist-packages/telegram/base.py", line 40, in to_json
return json.dumps(self.to_dict())
File "/usr/lib/python2.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/usr/lib/python2.7/json/encoder.py", line 201, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python2.7/json/encoder.py", line 264, in iterencode
return _iterencode(o, 0)
File "/usr/lib/python2.7/json/encoder.py", line 178, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: <telegram.user.User object at 0xf44c50> is not JSON serializable
|
TypeError
|
def sendDocument(self, chat_id, document, reply_to_message_id=None, reply_markup=None):
"""Use this method to send general files.
Args:
chat_id:
Unique identifier for the message recipient - User or GroupChat id.
document:
File to send. You can either pass a file_id as String to resend a
file that is already on the Telegram servers, or upload a new file
using multipart/form-data.
reply_to_message_id:
If the message is a reply, ID of the original message. [Optional]
reply_markup:
Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to force a
reply from the user. [Optional]
Returns:
A telegram.Message instance representing the message posted.
"""
url = "%s/sendDocument" % self.base_url
data = {"chat_id": chat_id, "document": document}
return url, data
|
def sendDocument(self, chat_id, document, reply_to_message_id=None, reply_markup=None):
"""Use this method to send Lesser files.
Args:
chat_id:
Unique identifier for the message recipient - User or GroupChat id.
document:
File to send. You can either pass a file_id as String to resend a
file that is already on the Telegram servers, or upload a new file
using multipart/form-data.
reply_to_message_id:
If the message is a reply, ID of the original message. [Optional]
reply_markup:
Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to force a
reply from the user. [Optional]
Returns:
A telegram.Message instance representing the message posted.
"""
url = "%s/sendDocument" % self.base_url
data = {"chat_id": chat_id, "document": document}
return url, data
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/31
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/pickle.py", line 1370, in dump
Pickler(file, protocol).dump(obj)
File "/usr/lib/python2.7/pickle.py", line 224, in dump
self.save(obj)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 600, in save_list
self._batch_appends(iter(obj))
File "/usr/lib/python2.7/pickle.py", line 615, in _batch_appends
save(x)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 306, in save
rv = reduce(self.proto)
File "/home/leandrotoledo/workspace/python-telegram-bot/env/lib/python2.7/copy_reg.py", line 70, in _reduce_ex
raise TypeError, "can't pickle %s objects" % base.__name__
TypeError: can't pickle lock objects
|
TypeError
|
def sendChatAction(self, chat_id, action):
"""Use this method when you need to tell the user that something is
happening on the bot's side. The status is set for 5 seconds or less
(when a message arrives from your bot, Telegram clients clear its
typing status).
Args:
chat_id:
Unique identifier for the message recipient - User or GroupChat id.
action:
Type of action to broadcast. Choose one, depending on what the user
is about to receive:
- ChatAction.TYPING for text messages,
- ChatAction.UPLOAD_PHOTO for photos,
- ChatAction.UPLOAD_VIDEO or upload_video for videos,
- ChatAction.UPLOAD_AUDIO or upload_audio for audio files,
- ChatAction.UPLOAD_DOCUMENT for general files,
- ChatAction.FIND_LOCATION for location data.
"""
url = "%s/sendChatAction" % self.base_url
data = {"chat_id": chat_id, "action": action}
return url, data
|
def sendChatAction(self, chat_id, action):
"""Use this method when you need to tell the user that something is
happening on the bot's side. The status is set for 5 seconds or less
(when a message arrives from your bot, Telegram clients clear its
typing status).
Args:
chat_id:
Unique identifier for the message recipient - User or GroupChat id.
action:
Type of action to broadcast. Choose one, depending on what the user
is about to receive:
- ChatAction.TYPING for text messages,
- ChatAction.UPLOAD_PHOTO for photos,
- ChatAction.UPLOAD_VIDEO or upload_video for videos,
- ChatAction.UPLOAD_AUDIO or upload_audio for audio files,
- ChatAction.UPLOAD_DOCUMENT for Lesser files,
- ChatAction.FIND_LOCATION for location data.
"""
url = "%s/sendChatAction" % self.base_url
data = {"chat_id": chat_id, "action": action}
return url, data
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/31
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/pickle.py", line 1370, in dump
Pickler(file, protocol).dump(obj)
File "/usr/lib/python2.7/pickle.py", line 224, in dump
self.save(obj)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 600, in save_list
self._batch_appends(iter(obj))
File "/usr/lib/python2.7/pickle.py", line 615, in _batch_appends
save(x)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 331, in save
self.save_reduce(obj=obj, *rv)
File "/usr/lib/python2.7/pickle.py", line 419, in save_reduce
save(state)
File "/usr/lib/python2.7/pickle.py", line 286, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python2.7/pickle.py", line 649, in save_dict
self._batch_setitems(obj.iteritems())
File "/usr/lib/python2.7/pickle.py", line 663, in _batch_setitems
save(v)
File "/usr/lib/python2.7/pickle.py", line 306, in save
rv = reduce(self.proto)
File "/home/leandrotoledo/workspace/python-telegram-bot/env/lib/python2.7/copy_reg.py", line 70, in _reduce_ex
raise TypeError, "can't pickle %s objects" % base.__name__
TypeError: can't pickle lock objects
|
TypeError
|
def read(self, filenames):
# RawConfigParser takes a filename or list of filenames, but we only
# ever call this with a single filename.
assert isinstance(filenames, path_types)
filename = filenames
if env.PYVERSION >= (3, 6):
filename = os.fspath(filename)
try:
with io.open(filename, encoding="utf-8") as fp:
toml_text = fp.read()
except IOError:
return []
if toml:
toml_text = substitute_variables(toml_text, os.environ)
try:
self.data = toml.loads(toml_text)
except toml.TomlDecodeError as err:
raise TomlDecodeError(*err.args)
return [filename]
else:
has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE)
if self.our_file or has_toml:
# Looks like they meant to read TOML, but we can't read it.
msg = "Can't read {!r} without TOML support. Install with [toml] extra"
raise CoverageException(msg.format(filename))
return []
|
def read(self, filenames):
from coverage.optional import toml
# RawConfigParser takes a filename or list of filenames, but we only
# ever call this with a single filename.
assert isinstance(filenames, path_types)
filename = filenames
if env.PYVERSION >= (3, 6):
filename = os.fspath(filename)
try:
with io.open(filename, encoding="utf-8") as fp:
toml_text = fp.read()
except IOError:
return []
if toml:
toml_text = substitute_variables(toml_text, os.environ)
try:
self.data = toml.loads(toml_text)
except toml.TomlDecodeError as err:
raise TomlDecodeError(*err.args)
return [filename]
else:
has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE)
if self.our_file or has_toml:
# Looks like they meant to read TOML, but we can't read it.
msg = "Can't read {!r} without TOML support. Install with [toml] extra"
raise CoverageException(msg.format(filename))
return []
|
https://github.com/nedbat/coveragepy/issues/1084
|
Traceback (most recent call last):
...
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/coverage/control.py", line 195, in __init__
self.config = read_coverage_config(
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/coverage/config.py", line 530, in read_coverage_config
config_read = config.from_file(fname, our_file=our_file)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/coverage/config.py", line 275, in from_file
files_read = cp.read(filename)
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/coverage/tomlconfig.py", line 32, in read
from coverage.optional import toml
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/coverage/optional.py", line 51, in <module>
import toml
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/toml/__init__.py", line 6, in <module>
from toml import encoder
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/toml/encoder.py", line 4, in <module>
from decimal import Decimal
File "/opt/local/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/decimal.py", line 3, in <module>
from _decimal import *
KeyError: 'numbers'
|
KeyError
|
def execute(self, sql, parameters=()):
"""Same as :meth:`python:sqlite3.Connection.execute`."""
if self.debug:
tail = " with {!r}".format(parameters) if parameters else ""
self.debug.write("Executing {!r}{}".format(sql, tail))
try:
try:
return self.con.execute(sql, parameters)
except Exception:
# In some cases, an error might happen that isn't really an
# error. Try again immediately.
# https://github.com/nedbat/coveragepy/issues/1010
return self.con.execute(sql, parameters)
except sqlite3.Error as exc:
msg = str(exc)
try:
# `execute` is the first thing we do with the database, so try
# hard to provide useful hints if something goes wrong now.
with open(self.filename, "rb") as bad_file:
cov4_sig = b"!coverage.py: This is a private format"
if bad_file.read(len(cov4_sig)) == cov4_sig:
msg = (
"Looks like a coverage 4.x data file. "
"Are you mixing versions of coverage?"
)
except Exception:
pass
if self.debug:
self.debug.write("EXCEPTION from execute: {}".format(msg))
raise CoverageException(
"Couldn't use data file {!r}: {}".format(self.filename, msg)
)
|
def execute(self, sql, parameters=()):
"""Same as :meth:`python:sqlite3.Connection.execute`."""
if self.debug:
tail = " with {!r}".format(parameters) if parameters else ""
self.debug.write("Executing {!r}{}".format(sql, tail))
try:
return self.con.execute(sql, parameters)
except sqlite3.Error as exc:
msg = str(exc)
try:
# `execute` is the first thing we do with the database, so try
# hard to provide useful hints if something goes wrong now.
with open(self.filename, "rb") as bad_file:
cov4_sig = b"!coverage.py: This is a private format"
if bad_file.read(len(cov4_sig)) == cov4_sig:
msg = (
"Looks like a coverage 4.x data file. "
"Are you mixing versions of coverage?"
)
except Exception:
pass
if self.debug:
self.debug.write("EXCEPTION from execute: {}".format(msg))
raise CoverageException(
"Couldn't use data file {!r}: {}".format(self.filename, msg)
)
|
https://github.com/nedbat/coveragepy/issues/1010
|
Traceback (most recent call last):
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/bin/coverage", line 8, in <module>
sys.exit(main())
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/cmdline.py", line 865, in main
status = CoverageScript().command_line(argv)
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/cmdline.py", line 582, in command_line
return self.do_run(options, args)
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/cmdline.py", line 747, in do_run
self.coverage.save()
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/control.py", line 651, in save
data = self.get_data()
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/control.py", line 705, in get_data
if self._collector and self._collector.flush_data():
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/collector.py", line 423, in flush_data
self.covdata.add_arcs(self.mapped_file_dict(self.data))
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/sqldata.py", line 480, in add_arcs
data,
File "/home/runner/work/pytest/pytest/.tox/pypy3-coverage/site-packages/coverage/sqldata.py", line 1089, in executemany
return self.con.executemany(sql, data)
File "/opt/hostedtoolcache/PyPy/3.6.9/x64/lib_pypy/_sqlite3.py", line 423, in executemany
return cur.executemany(*args)
File "/opt/hostedtoolcache/PyPy/3.6.9/x64/lib_pypy/_sqlite3.py", line 773, in wrapper
return func(self, *args, **kwargs)
File "/opt/hostedtoolcache/PyPy/3.6.9/x64/lib_pypy/_sqlite3.py", line 938, in executemany
return self.__execute(True, sql, many_params)
File "/opt/hostedtoolcache/PyPy/3.6.9/x64/lib_pypy/_sqlite3.py", line 889, in __execute
self.__statement._set_params(params)
File "/opt/hostedtoolcache/PyPy/3.6.9/x64/lib_pypy/_sqlite3.py", line 1180, in _set_params
"probably unsupported type." % i)
_sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
|
_sqlite3.InterfaceError
|
def prepare(self):
"""Set sys.path properly.
This needs to happen before any importing, and without importing anything.
"""
if self.as_module:
if env.PYBEHAVIOR.actual_syspath0_dash_m:
path0 = os.getcwd()
else:
path0 = ""
elif os.path.isdir(self.arg0):
# Running a directory means running the __main__.py file in that
# directory.
path0 = self.arg0
else:
path0 = os.path.abspath(os.path.dirname(self.arg0))
if os.path.isdir(sys.path[0]):
# sys.path fakery. If we are being run as a command, then sys.path[0]
# is the directory of the "coverage" script. If this is so, replace
# sys.path[0] with the directory of the file we're running, or the
# current directory when running modules. If it isn't so, then we
# don't know what's going on, and just leave it alone.
top_file = inspect.stack()[-1][0].f_code.co_filename
sys_path_0_abs = os.path.abspath(sys.path[0])
top_file_dir_abs = os.path.abspath(os.path.dirname(top_file))
sys_path_0_abs = canonical_filename(sys_path_0_abs)
top_file_dir_abs = canonical_filename(top_file_dir_abs)
if sys_path_0_abs != top_file_dir_abs:
path0 = None
else:
# sys.path[0] is a file. Is the next entry the directory containing
# that file?
if sys.path[1] == os.path.dirname(sys.path[0]):
# Can it be right to always remove that?
del sys.path[1]
if path0 is not None:
sys.path[0] = python_reported_file(path0)
|
def prepare(self):
"""Set sys.path properly.
This needs to happen before any importing, and without importing anything.
"""
should_update_sys_path = True
if self.as_module:
if env.PYBEHAVIOR.actual_syspath0_dash_m:
path0 = os.getcwd()
else:
path0 = ""
sys.path[0] = path0
should_update_sys_path = False
elif os.path.isdir(self.arg0):
# Running a directory means running the __main__.py file in that
# directory.
path0 = self.arg0
else:
path0 = os.path.abspath(os.path.dirname(self.arg0))
if should_update_sys_path:
# sys.path fakery. If we are being run as a command, then sys.path[0]
# is the directory of the "coverage" script. If this is so, replace
# sys.path[0] with the directory of the file we're running, or the
# current directory when running modules. If it isn't so, then we
# don't know what's going on, and just leave it alone.
top_file = inspect.stack()[-1][0].f_code.co_filename
if os.path.abspath(sys.path[0]) == os.path.abspath(os.path.dirname(top_file)):
# Set sys.path correctly.
sys.path[0] = python_reported_file(path0)
|
https://github.com/nedbat/coveragepy/issues/862
|
Traceback (most recent call last):
File "manage.py", line 21, in <module>
main()
File "manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\commands\test.py", line 23, in run_from_argv
super().run_from_argv(argv)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\base.py", line 320, in run_from_argv
parser = self.create_parser(argv[0], argv[1])
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\base.py", line 294, in create_parser
self.add_arguments(parser)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\commands\test.py", line 44, in add_arguments
test_runner_class = get_runner(settings, self.test_runner)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\test\utils.py", line 301, in get_runner
test_runner_class = test_runner_class or settings.TEST_RUNNER
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\conf\__init__.py", line 76, in __getattr__
self._setup(name)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\conf\__init__.py", line 63, in _setup
self._wrapped = Settings(settings_module)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\conf\__init__.py", line 142, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\importlib\__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 936, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 936, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 948, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'newproject'
|
ModuleNotFoundError
|
def do_zip_mods():
"""Build the zipmods.zip file."""
zf = zipfile.ZipFile("tests/zipmods.zip", "w")
# Take one file from disk.
zf.write("tests/covmodzip1.py", "covmodzip1.py")
# The others will be various encodings.
source = textwrap.dedent("""\
# coding: {encoding}
text = u"{text}"
ords = {ords}
assert [ord(c) for c in text] == ords
print(u"All OK with {encoding}")
""")
# These encodings should match the list in tests/test_python.py
details = [
("utf8", "ⓗⓔⓛⓛⓞ, ⓦⓞⓡⓛⓓ"),
("gb2312", "你好,世界"),
("hebrew", "שלום, עולם"),
("shift_jis", "こんにちは世界"),
("cp1252", "“hi”"),
]
for encoding, text in details:
filename = "encoded_{}.py".format(encoding)
ords = [ord(c) for c in text]
source_text = source.format(encoding=encoding, text=text, ords=ords)
zf.writestr(filename, source_text.encode(encoding))
zf.close()
zf = zipfile.ZipFile("tests/covmain.zip", "w")
zf.write("coverage/__main__.py", "__main__.py")
zf.close()
|
def do_zip_mods():
"""Build the zipmods.zip file."""
zf = zipfile.ZipFile("tests/zipmods.zip", "w")
# Take one file from disk.
zf.write("tests/covmodzip1.py", "covmodzip1.py")
# The others will be various encodings.
source = textwrap.dedent("""\
# coding: {encoding}
text = u"{text}"
ords = {ords}
assert [ord(c) for c in text] == ords
print(u"All OK with {encoding}")
""")
# These encodings should match the list in tests/test_python.py
details = [
("utf8", "ⓗⓔⓛⓛⓞ, ⓦⓞⓡⓛⓓ"),
("gb2312", "你好,世界"),
("hebrew", "שלום, עולם"),
("shift_jis", "こんにちは世界"),
("cp1252", "“hi”"),
]
for encoding, text in details:
filename = "encoded_{}.py".format(encoding)
ords = [ord(c) for c in text]
source_text = source.format(encoding=encoding, text=text, ords=ords)
zf.writestr(filename, source_text.encode(encoding))
zf.close()
|
https://github.com/nedbat/coveragepy/issues/862
|
Traceback (most recent call last):
File "manage.py", line 21, in <module>
main()
File "manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\commands\test.py", line 23, in run_from_argv
super().run_from_argv(argv)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\base.py", line 320, in run_from_argv
parser = self.create_parser(argv[0], argv[1])
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\base.py", line 294, in create_parser
self.add_arguments(parser)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\core\management\commands\test.py", line 44, in add_arguments
test_runner_class = get_runner(settings, self.test_runner)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\test\utils.py", line 301, in get_runner
test_runner_class = test_runner_class or settings.TEST_RUNNER
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\conf\__init__.py", line 76, in __getattr__
self._setup(name)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\conf\__init__.py", line 63, in _setup
self._wrapped = Settings(settings_module)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\site-packages\django\conf\__init__.py", line 142, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "c:\users\spaceofmiah\.virtualenvs\backend--o8je5ax\lib\importlib\__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 936, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 936, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 978, in _gcd_import
File "<frozen importlib._bootstrap>", line 961, in _find_and_load
File "<frozen importlib._bootstrap>", line 948, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'newproject'
|
ModuleNotFoundError
|
def sys_info(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
self._init()
self._post_init()
def plugin_info(plugins):
"""Make an entry for the sys_info from a list of plug-ins."""
entries = []
for plugin in plugins:
entry = plugin._coverage_plugin_name
if not plugin._coverage_enabled:
entry += " (disabled)"
entries.append(entry)
return entries
info = [
("version", covmod.__version__),
("coverage", covmod.__file__),
("tracer", self._collector.tracer_name() if self._collector else "-none-"),
("CTracer", "available" if CTracer else "unavailable"),
("plugins.file_tracers", plugin_info(self._plugins.file_tracers)),
("plugins.configurers", plugin_info(self._plugins.configurers)),
("plugins.context_switchers", plugin_info(self._plugins.context_switchers)),
("configs_attempted", self.config.attempted_config_files),
("configs_read", self.config.config_files_read),
("config_file", self.config.config_file),
(
"config_contents",
repr(self.config._config_contents)
if self.config._config_contents
else "-none-",
),
(
"data_file",
self._data.data_filename() if self._data is not None else "-none-",
),
("python", sys.version.replace("\n", "")),
("platform", platform.platform()),
("implementation", platform.python_implementation()),
("executable", sys.executable),
("def_encoding", sys.getdefaultencoding()),
("fs_encoding", sys.getfilesystemencoding()),
("pid", os.getpid()),
("cwd", os.getcwd()),
("path", sys.path),
(
"environment",
sorted(
("%s = %s" % (k, v))
for k, v in iitems(os.environ)
if any(slug in k for slug in ("COV", "PY"))
),
),
("command_line", " ".join(getattr(sys, "argv", ["-none-"]))),
]
if self._inorout:
info.extend(self._inorout.sys_info())
info.extend(CoverageData.sys_info())
return info
|
def sys_info(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
self._init()
self._post_init()
def plugin_info(plugins):
"""Make an entry for the sys_info from a list of plug-ins."""
entries = []
for plugin in plugins:
entry = plugin._coverage_plugin_name
if not plugin._coverage_enabled:
entry += " (disabled)"
entries.append(entry)
return entries
info = [
("version", covmod.__version__),
("coverage", covmod.__file__),
("tracer", self._collector.tracer_name() if self._collector else "-none-"),
("CTracer", "available" if CTracer else "unavailable"),
("plugins.file_tracers", plugin_info(self._plugins.file_tracers)),
("plugins.configurers", plugin_info(self._plugins.configurers)),
("plugins.context_switchers", plugin_info(self._plugins.context_switchers)),
("configs_attempted", self.config.attempted_config_files),
("configs_read", self.config.config_files_read),
("config_file", self.config.config_file),
(
"config_contents",
repr(self.config._config_contents)
if self.config._config_contents
else "-none-",
),
("data_file", self._data.filename if self._data else "-none-"),
("python", sys.version.replace("\n", "")),
("platform", platform.platform()),
("implementation", platform.python_implementation()),
("executable", sys.executable),
("def_encoding", sys.getdefaultencoding()),
("fs_encoding", sys.getfilesystemencoding()),
("pid", os.getpid()),
("cwd", os.getcwd()),
("path", sys.path),
(
"environment",
sorted(
("%s = %s" % (k, v))
for k, v in iitems(os.environ)
if any(slug in k for slug in ("COV", "PY"))
),
),
("command_line", " ".join(getattr(sys, "argv", ["-none-"]))),
]
if self._inorout:
info.extend(self._inorout.sys_info())
info.extend(CoverageData.sys_info())
return info
|
https://github.com/nedbat/coveragepy/issues/907
|
$ coverage run --debug=sys foo.py
Traceback (most recent call last):
File ".tox/py36/bin/coverage", line 11, in <module>
load_entry_point('coverage', 'console_scripts', 'coverage')()
File "/Users/ned/coverage/trunk/coverage/cmdline.py", line 824, in main
status = CoverageScript().command_line(argv)
File "/Users/ned/coverage/trunk/coverage/cmdline.py", line 555, in command_line
return self.do_run(options, args)
File "/Users/ned/coverage/trunk/coverage/cmdline.py", line 700, in do_run
self.coverage.start()
File "/Users/ned/coverage/trunk/coverage/control.py", line 516, in start
self._post_init()
File "/Users/ned/coverage/trunk/coverage/control.py", line 282, in _post_init
self._write_startup_debug()
File "/Users/ned/coverage/trunk/coverage/control.py", line 300, in _write_startup_debug
write_formatted_info(self._debug, "sys", self.sys_info())
File "/Users/ned/coverage/trunk/coverage/control.py", line 1008, in sys_info
('data_file', self._data.filename if self._data else "-none-"),
AttributeError: 'CoverageData' object has no attribute 'filename'
|
AttributeError
|
def from_file(self, filename, our_file):
"""Read configuration from a .rc file.
`filename` is a file name to read.
`our_file` is True if this config file is specifically for coverage,
False if we are examining another config file (tox.ini, setup.cfg)
for possible settings.
Returns True or False, whether the file could be read, and it had some
coverage.py settings in it.
"""
_, ext = os.path.splitext(filename)
if ext == ".toml":
cp = TomlConfigParser(our_file)
else:
cp = HandyConfigParser(our_file)
self.attempted_config_files.append(filename)
try:
files_read = cp.read(filename)
except (configparser.Error, TomlDecodeError) as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
if not files_read:
return False
self.config_files_read.extend(map(os.path.abspath, files_read))
any_set = False
try:
for option_spec in self.CONFIG_FILE_OPTIONS:
was_set = self._set_attr_from_config_option(cp, *option_spec)
if was_set:
any_set = True
except ValueError as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
# Check that there are no unrecognized options.
all_options = collections.defaultdict(set)
for option_spec in self.CONFIG_FILE_OPTIONS:
section, option = option_spec[1].split(":")
all_options[section].add(option)
for section, options in iitems(all_options):
real_section = cp.has_section(section)
if real_section:
for unknown in set(cp.options(section)) - options:
raise CoverageException(
"Unrecognized option '[%s] %s=' in config file %s"
% (real_section, unknown, filename)
)
# [paths] is special
if cp.has_section("paths"):
for option in cp.options("paths"):
self.paths[option] = cp.getlist("paths", option)
any_set = True
# plugins can have options
for plugin in self.plugins:
if cp.has_section(plugin):
self.plugin_options[plugin] = cp.get_section(plugin)
any_set = True
# Was this file used as a config file? If it's specifically our file,
# then it was used. If we're piggybacking on someone else's file,
# then it was only used if we found some settings in it.
if our_file:
used = True
else:
used = any_set
if used:
self.config_file = os.path.abspath(filename)
with open(filename) as f:
self._config_contents = f.read()
return used
|
def from_file(self, filename, our_file):
"""Read configuration from a .rc file.
`filename` is a file name to read.
`our_file` is True if this config file is specifically for coverage,
False if we are examining another config file (tox.ini, setup.cfg)
for possible settings.
Returns True or False, whether the file could be read, and it had some
coverage.py settings in it.
"""
_, ext = os.path.splitext(filename)
if ext == ".toml":
cp = TomlConfigParser(our_file)
else:
cp = HandyConfigParser(our_file)
self.attempted_config_files.append(filename)
try:
files_read = cp.read(filename)
except (configparser.Error, TomlDecodeError) as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
if not files_read:
return False
self.config_files_read.extend(files_read)
any_set = False
try:
for option_spec in self.CONFIG_FILE_OPTIONS:
was_set = self._set_attr_from_config_option(cp, *option_spec)
if was_set:
any_set = True
except ValueError as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
# Check that there are no unrecognized options.
all_options = collections.defaultdict(set)
for option_spec in self.CONFIG_FILE_OPTIONS:
section, option = option_spec[1].split(":")
all_options[section].add(option)
for section, options in iitems(all_options):
real_section = cp.has_section(section)
if real_section:
for unknown in set(cp.options(section)) - options:
raise CoverageException(
"Unrecognized option '[%s] %s=' in config file %s"
% (real_section, unknown, filename)
)
# [paths] is special
if cp.has_section("paths"):
for option in cp.options("paths"):
self.paths[option] = cp.getlist("paths", option)
any_set = True
# plugins can have options
for plugin in self.plugins:
if cp.has_section(plugin):
self.plugin_options[plugin] = cp.get_section(plugin)
any_set = True
# Was this file used as a config file? If it's specifically our file,
# then it was used. If we're piggybacking on someone else's file,
# then it was only used if we found some settings in it.
if our_file:
used = True
else:
used = any_set
if used:
self.config_file = filename
with open(filename) as f:
self._config_contents = f.read()
return used
|
https://github.com/nedbat/coveragepy/issues/890
|
Traceback (most recent call last):
File "repro.py", line 7, in <module>
with multiprocessing.Manager() as manager:
File "/usr/lib/python3.8/multiprocessing/context.py", line 57, in Manager
m.start()
File "/usr/lib/python3.8/multiprocessing/managers.py", line 583, in start
self._address = reader.recv()
File "/usr/lib/python3.8/multiprocessing/connection.py", line 250, in recv
buf = self._recv_bytes()
File "/usr/lib/python3.8/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 383, in _recv
raise EOFError
EOFError
|
EOFError
|
def patch_multiprocessing(rcfile):
"""Monkey-patch the multiprocessing module.
This enables coverage measurement of processes started by multiprocessing.
This involves aggressive monkey-patching.
`rcfile` is the path to the rcfile being used.
"""
if hasattr(multiprocessing, PATCHED_MARKER):
return
if env.PYVERSION >= (3, 4):
OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap
else:
multiprocessing.Process = ProcessWithCoverage
# Set the value in ProcessWithCoverage that will be pickled into the child
# process.
os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile)
# When spawning processes rather than forking them, we have no state in the
# new process. We sneak in there with a Stowaway: we stuff one of our own
# objects into the data that gets pickled and sent to the sub-process. When
# the Stowaway is unpickled, it's __setstate__ method is called, which
# re-applies the monkey-patch.
# Windows only spawns, so this is needed to keep Windows working.
try:
from multiprocessing import spawn
original_get_preparation_data = spawn.get_preparation_data
except (ImportError, AttributeError):
pass
else:
def get_preparation_data_with_stowaway(name):
"""Get the original preparation data, and also insert our stowaway."""
d = original_get_preparation_data(name)
d["stowaway"] = Stowaway(rcfile)
return d
spawn.get_preparation_data = get_preparation_data_with_stowaway
setattr(multiprocessing, PATCHED_MARKER, True)
|
def patch_multiprocessing(rcfile):
"""Monkey-patch the multiprocessing module.
This enables coverage measurement of processes started by multiprocessing.
This involves aggressive monkey-patching.
`rcfile` is the path to the rcfile being used.
"""
if hasattr(multiprocessing, PATCHED_MARKER):
return
if env.PYVERSION >= (3, 4):
OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap
else:
multiprocessing.Process = ProcessWithCoverage
# Set the value in ProcessWithCoverage that will be pickled into the child
# process.
os.environ["COVERAGE_RCFILE"] = rcfile
# When spawning processes rather than forking them, we have no state in the
# new process. We sneak in there with a Stowaway: we stuff one of our own
# objects into the data that gets pickled and sent to the sub-process. When
# the Stowaway is unpickled, it's __setstate__ method is called, which
# re-applies the monkey-patch.
# Windows only spawns, so this is needed to keep Windows working.
try:
from multiprocessing import spawn
original_get_preparation_data = spawn.get_preparation_data
except (ImportError, AttributeError):
pass
else:
def get_preparation_data_with_stowaway(name):
"""Get the original preparation data, and also insert our stowaway."""
d = original_get_preparation_data(name)
d["stowaway"] = Stowaway(rcfile)
return d
spawn.get_preparation_data = get_preparation_data_with_stowaway
setattr(multiprocessing, PATCHED_MARKER, True)
|
https://github.com/nedbat/coveragepy/issues/890
|
Traceback (most recent call last):
File "repro.py", line 7, in <module>
with multiprocessing.Manager() as manager:
File "/usr/lib/python3.8/multiprocessing/context.py", line 57, in Manager
m.start()
File "/usr/lib/python3.8/multiprocessing/managers.py", line 583, in start
self._address = reader.recv()
File "/usr/lib/python3.8/multiprocessing/connection.py", line 250, in recv
buf = self._recv_bytes()
File "/usr/lib/python3.8/multiprocessing/connection.py", line 414, in _recv_bytes
buf = self._recv(4)
File "/usr/lib/python3.8/multiprocessing/connection.py", line 383, in _recv
raise EOFError
EOFError
|
EOFError
|
def execute(self, sql, parameters=()):
"""Same as :meth:`python:sqlite3.Connection.execute`."""
if self.debug:
tail = " with {!r}".format(parameters) if parameters else ""
self.debug.write("Executing {!r}{}".format(sql, tail))
try:
return self.con.execute(sql, parameters)
except sqlite3.Error as exc:
msg = str(exc)
try:
# `execute` is the first thing we do with the database, so try
# hard to provide useful hints if something goes wrong now.
with open(self.filename, "rb") as bad_file:
cov4_sig = b"!coverage.py: This is a private format"
if bad_file.read(len(cov4_sig)) == cov4_sig:
msg = (
"Looks like a coverage 4.x data file. "
"Are you mixing versions of coverage?"
)
except Exception:
pass
raise CoverageException(
"Couldn't use data file {!r}: {}".format(self.filename, msg)
)
|
def execute(self, sql, parameters=()):
"""Same as :meth:`python:sqlite3.Connection.execute`."""
if self.debug:
tail = " with {!r}".format(parameters) if parameters else ""
self.debug.write("Executing {!r}{}".format(sql, tail))
try:
return self.con.execute(sql, parameters)
except sqlite3.Error as exc:
raise CoverageException(
"Couldn't use data file {!r}: {}".format(self.filename, exc)
)
|
https://github.com/nedbat/coveragepy/issues/886
|
Traceback (most recent call last):
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 1025, in execute
return self.con.execute(sql, parameters)
sqlite3.DatabaseError: file is not a database
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/bin/pytest", line 8, in <module>
sys.exit(main())
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/_pytest/config/__init__.py", line 72, in main
config = _prepareconfig(args, plugins)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/_pytest/config/__init__.py", line 223, in _prepareconfig
pluginmanager=pluginmanager, args=args
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/hooks.py", line 286, in __call__
return self._hookexec(self, self.get_hookimpls(), kwargs)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/manager.py", line 93, in _hookexec
return self._inner_hookexec(hook, methods, kwargs)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/manager.py", line 87, in <lambda>
firstresult=hook.spec.opts.get("firstresult") if hook.spec else False,
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/callers.py", line 203, in _multicall
gen.send(outcome)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/_pytest/helpconfig.py", line 89, in pytest_cmdline_parse
config = outcome.get_result()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/callers.py", line 80, in get_result
raise ex[1].with_traceback(ex[2])
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/callers.py", line 187, in _multicall
res = hook_impl.function(*args)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/_pytest/config/__init__.py", line 793, in pytest_cmdline_parse
self.parse(args)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/_pytest/config/__init__.py", line 999, in parse
self._preparse(args, addopts=addopts)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/_pytest/config/__init__.py", line 957, in _preparse
early_config=self, args=args, parser=self._parser
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/hooks.py", line 286, in __call__
return self._hookexec(self, self.get_hookimpls(), kwargs)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/manager.py", line 93, in _hookexec
return self._inner_hookexec(hook, methods, kwargs)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/manager.py", line 87, in <lambda>
firstresult=hook.spec.opts.get("firstresult") if hook.spec else False,
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/callers.py", line 208, in _multicall
return outcome.get_result()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/callers.py", line 80, in get_result
raise ex[1].with_traceback(ex[2])
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pluggy/callers.py", line 187, in _multicall
res = hook_impl.function(*args)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pytest_cov/plugin.py", line 118, in pytest_load_initial_conftests
plugin = CovPlugin(early_config.known_args_namespace, early_config.pluginmanager)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pytest_cov/plugin.py", line 164, in __init__
self.start(engine.Central)
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pytest_cov/plugin.py", line 186, in start
self.cov_controller.start()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/pytest_cov/engine.py", line 187, in start
self.cov.load()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/control.py", line 387, in load
self._data.read()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 741, in read
with self._connect(): # TODO: doesn't look right
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 296, in _connect
self._open_db()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 264, in _open_db
self._read_db()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 268, in _read_db
with self._dbs[get_thread_id()] as db:
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 1008, in __enter__
self._connect()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 996, in _connect
self.execute("pragma journal_mode=off").close()
File "/Users/ellbosch/Repos/mssql-cli/.tox/py37/lib/python3.7/site-packages/coverage/sqldata.py", line 1027, in execute
raise CoverageException("Couldn't use data file {!r}: {}".format(self.filename, exc))
coverage.misc.CoverageException: Couldn't use data file '/Users/ellbosch/Repos/mssql-cli/.coverage': file is not a database
Command '['pytest', '-l', '--cov', 'mssqlcli', '--doctest-modules', '--junitxml=junit/test-6d62ae30-2031-11ea-80ec-3af9d39e6caa-results.xml', '--cov-report=xml', '--cov-report=html', '--cov-append', '-o', 'junit_suite_name=pytest-3.7.4', '-s', '-m', 'not unstable', 'tests/jsonrpc']' returned non-zero exit status 1.
ERROR: InvocationError for command /Users/ellbosch/Repos/mssql-cli/.tox/py37/bin/python build.py unit_test (exited with code 1)
|
sqlite3.DatabaseError
|
def _connect(self):
"""Connect to the db and do universal initialization."""
if self.con is not None:
return
# SQLite on Windows on py2 won't open a file if the filename argument
# has non-ascii characters in it. Opening a relative file name avoids
# a problem if the current directory has non-ascii.
try:
filename = os.path.relpath(self.filename)
except ValueError:
# ValueError can be raised under Windows when os.getcwd() returns a
# folder from a different drive than the drive of self.filename in
# which case we keep the original value of self.filename unchanged,
# hoping that we won't face the non-ascii directory problem.
filename = self.filename
# It can happen that Python switches threads while the tracer writes
# data. The second thread will also try to write to the data,
# effectively causing a nested context. However, given the idempotent
# nature of the tracer operations, sharing a connection among threads
# is not a problem.
if self.debug:
self.debug.write("Connecting to {!r}".format(self.filename))
self.con = sqlite3.connect(filename, check_same_thread=False)
self.con.create_function("REGEXP", 2, _regexp)
# This pragma makes writing faster. It disables rollbacks, but we never need them.
# PyPy needs the .close() calls here, or sqlite gets twisted up:
# https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
self.execute("pragma journal_mode=off").close()
# This pragma makes writing faster.
self.execute("pragma synchronous=off").close()
|
def _connect(self):
"""Connect to the db and do universal initialization."""
if self.con is not None:
return
# SQLite on Windows on py2 won't open a file if the filename argument
# has non-ascii characters in it. Opening a relative file name avoids
# a problem if the current directory has non-ascii.
filename = os.path.relpath(self.filename)
# It can happen that Python switches threads while the tracer writes
# data. The second thread will also try to write to the data,
# effectively causing a nested context. However, given the idempotent
# nature of the tracer operations, sharing a connection among threads
# is not a problem.
if self.debug:
self.debug.write("Connecting to {!r}".format(self.filename))
self.con = sqlite3.connect(filename, check_same_thread=False)
self.con.create_function("REGEXP", 2, _regexp)
# This pragma makes writing faster. It disables rollbacks, but we never need them.
# PyPy needs the .close() calls here, or sqlite gets twisted up:
# https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
self.execute("pragma journal_mode=off").close()
# This pragma makes writing faster.
self.execute("pragma synchronous=off").close()
|
https://github.com/nedbat/coveragepy/issues/895
|
E AssertionError: Error in atexit._run_exitfuncs:
E Traceback (most recent call last):
E File "c:\miniconda\envs\testvenv\lib\ntpath.py", line 562, in relpath
E path_drive, start_drive))
E ValueError: path is on mount 'D:', start on mount 'c:'
cmd = ['c:\\miniconda\\envs\\testvenv\\python.exe', 'C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\tmp32ajt78f_src_test_sklearn.py']
coverage_rc = None
cwd = 'c:\\miniconda\\envs\\testvenv\\lib\\site-packages'
...
|
AssertionError
|
def __init__(self, args, as_module=False):
self.args = args
self.as_module = as_module
self.arg0 = args[0]
self.package = self.modulename = self.pathname = self.loader = self.spec = None
|
def __init__(self, args, as_module=False):
self.args = args
self.as_module = as_module
self.arg0 = args[0]
self.package = self.modulename = self.pathname = None
|
https://github.com/nedbat/coveragepy/issues/838
|
$ python --version
Python 3.5.6 :: Anaconda, Inc.
$ coverage --version
Coverage.py, version 4.5.1 with C extension
Documentation at https://coverage.readthedocs.io
$ cat foo.py
print(__name__)
print(__spec__.name)
$ python -m foo # Behaves as expected
__main__
foo
$ coverage run -m foo # Observe __spec__ is None
__main__
Traceback (most recent call last):
File "/nas/dft/ire/rhys/Build/liquidmetal-debt/foo.py", line 2, in <module>
print(__spec__.name)
AttributeError: 'NoneType' object has no attribute 'name'
Coverage.py warning: No data was collected. (no-data-collected)
|
AttributeError
|
def prepare(self):
"""Do initial preparation to run Python code.
Includes finding the module to run, adjusting sys.argv[0], and changing
sys.path to match what Python does.
"""
should_update_sys_path = True
if self.as_module:
if env.PYBEHAVIOR.actual_syspath0_dash_m:
path0 = os.getcwd()
else:
path0 = ""
sys.path[0] = path0
should_update_sys_path = False
self.modulename = self.arg0
pathname, self.package, self.spec = find_module(self.modulename)
if self.spec is not None:
self.modulename = self.spec.name
self.loader = DummyLoader(self.modulename)
self.pathname = os.path.abspath(pathname)
self.args[0] = self.arg0 = self.pathname
elif os.path.isdir(self.arg0):
# Running a directory means running the __main__.py file in that
# directory.
path0 = self.arg0
for ext in [".py", ".pyc", ".pyo"]:
try_filename = os.path.join(self.arg0, "__main__" + ext)
if os.path.exists(try_filename):
self.arg0 = try_filename
break
else:
raise NoSource("Can't find '__main__' module in '%s'" % self.arg0)
if env.PY2:
self.arg0 = os.path.abspath(self.arg0)
# Make a spec. I don't know if this is the right way to do it.
try:
import importlib.machinery
except ImportError:
pass
else:
self.spec = importlib.machinery.ModuleSpec(
"__main__", None, origin=try_filename
)
self.spec.has_location = True
self.package = ""
self.loader = DummyLoader("__main__")
else:
path0 = os.path.abspath(os.path.dirname(self.arg0))
if env.PY3:
self.loader = DummyLoader("__main__")
if self.modulename is None:
self.modulename = "__main__"
if should_update_sys_path:
# sys.path fakery. If we are being run as a command, then sys.path[0]
# is the directory of the "coverage" script. If this is so, replace
# sys.path[0] with the directory of the file we're running, or the
# current directory when running modules. If it isn't so, then we
# don't know what's going on, and just leave it alone.
top_file = inspect.stack()[-1][0].f_code.co_filename
if os.path.abspath(sys.path[0]) == os.path.abspath(os.path.dirname(top_file)):
# Set sys.path correctly.
sys.path[0] = path0
|
def prepare(self):
"""Do initial preparation to run Python code.
Includes finding the module to run, adjusting sys.argv[0], and changing
sys.path to match what Python does.
"""
should_update_sys_path = True
if self.as_module:
if env.PYBEHAVIOR.actual_syspath0_dash_m:
path0 = os.getcwd()
else:
path0 = ""
sys.path[0] = path0
should_update_sys_path = False
self.modulename = self.arg0
pathname, self.package = find_module(self.modulename)
self.pathname = os.path.abspath(pathname)
self.args[0] = self.arg0 = self.pathname
elif os.path.isdir(self.arg0):
# Running a directory means running the __main__.py file in that
# directory.
path0 = self.arg0
for ext in [".py", ".pyc", ".pyo"]:
try_filename = os.path.join(self.arg0, "__main__" + ext)
if os.path.exists(try_filename):
self.arg0 = try_filename
break
else:
raise NoSource("Can't find '__main__' module in '%s'" % self.arg0)
else:
path0 = os.path.abspath(os.path.dirname(self.arg0))
if self.modulename is None and env.PYVERSION >= (3, 3):
self.modulename = "__main__"
if should_update_sys_path:
# sys.path fakery. If we are being run as a command, then sys.path[0]
# is the directory of the "coverage" script. If this is so, replace
# sys.path[0] with the directory of the file we're running, or the
# current directory when running modules. If it isn't so, then we
# don't know what's going on, and just leave it alone.
top_file = inspect.stack()[-1][0].f_code.co_filename
if os.path.abspath(sys.path[0]) == os.path.abspath(os.path.dirname(top_file)):
# Set sys.path correctly.
sys.path[0] = path0
|
https://github.com/nedbat/coveragepy/issues/838
|
$ python --version
Python 3.5.6 :: Anaconda, Inc.
$ coverage --version
Coverage.py, version 4.5.1 with C extension
Documentation at https://coverage.readthedocs.io
$ cat foo.py
print(__name__)
print(__spec__.name)
$ python -m foo # Behaves as expected
__main__
foo
$ coverage run -m foo # Observe __spec__ is None
__main__
Traceback (most recent call last):
File "/nas/dft/ire/rhys/Build/liquidmetal-debt/foo.py", line 2, in <module>
print(__spec__.name)
AttributeError: 'NoneType' object has no attribute 'name'
Coverage.py warning: No data was collected. (no-data-collected)
|
AttributeError
|
def run(self):
"""Run the Python code!"""
# Create a module to serve as __main__
main_mod = types.ModuleType("__main__")
from_pyc = self.arg0.endswith((".pyc", ".pyo"))
main_mod.__file__ = self.arg0
if from_pyc:
main_mod.__file__ = main_mod.__file__[:-1]
if self.package is not None:
main_mod.__package__ = self.package
main_mod.__loader__ = self.loader
if self.spec is not None:
main_mod.__spec__ = self.spec
main_mod.__builtins__ = BUILTINS
sys.modules["__main__"] = main_mod
# Set sys.argv properly.
sys.argv = self.args
try:
# Make a code object somehow.
if from_pyc:
code = make_code_from_pyc(self.arg0)
else:
code = make_code_from_py(self.arg0)
except CoverageException:
raise
except Exception as exc:
msg = (
"Couldn't run {filename!r} as Python code: {exc.__class__.__name__}: {exc}"
)
raise CoverageException(msg.format(filename=self.arg0, exc=exc))
# Execute the code object.
# Return to the original directory in case the test code exits in
# a non-existent directory.
cwd = os.getcwd()
try:
exec(code, main_mod.__dict__)
except SystemExit: # pylint: disable=try-except-raise
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except Exception:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel one layer off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
# PyPy3 weirdness. If I don't access __context__, then somehow it
# is non-None when the exception is reported at the upper layer,
# and a nested exception is shown to the user. This getattr fixes
# it somehow? https://bitbucket.org/pypy/pypy/issue/1903
getattr(err, "__context__", None)
# Call the excepthook.
try:
if hasattr(err, "__traceback__"):
err.__traceback__ = err.__traceback__.tb_next
sys.excepthook(typ, err, tb.tb_next)
except SystemExit: # pylint: disable=try-except-raise
raise
except Exception:
# Getting the output right in the case of excepthook
# shenanigans is kind of involved.
sys.stderr.write("Error in sys.excepthook:\n")
typ2, err2, tb2 = sys.exc_info()
err2.__suppress_context__ = True
if hasattr(err2, "__traceback__"):
err2.__traceback__ = err2.__traceback__.tb_next
sys.__excepthook__(typ2, err2, tb2.tb_next)
sys.stderr.write("\nOriginal exception was:\n")
raise ExceptionDuringRun(typ, err, tb.tb_next)
else:
sys.exit(1)
finally:
os.chdir(cwd)
|
def run(self):
"""Run the Python code!"""
# Create a module to serve as __main__
main_mod = types.ModuleType("__main__")
sys.modules["__main__"] = main_mod
main_mod.__file__ = self.arg0
if self.package:
main_mod.__package__ = self.package
if self.modulename:
main_mod.__loader__ = DummyLoader(self.modulename)
main_mod.__builtins__ = BUILTINS
# Set sys.argv properly.
sys.argv = self.args
try:
# Make a code object somehow.
if self.arg0.endswith((".pyc", ".pyo")):
code = make_code_from_pyc(self.arg0)
else:
code = make_code_from_py(self.arg0)
except CoverageException:
raise
except Exception as exc:
msg = (
"Couldn't run {filename!r} as Python code: {exc.__class__.__name__}: {exc}"
)
raise CoverageException(msg.format(filename=self.arg0, exc=exc))
# Execute the code object.
# Return to the original directory in case the test code exits in
# a non-existent directory.
cwd = os.getcwd()
try:
exec(code, main_mod.__dict__)
except SystemExit: # pylint: disable=try-except-raise
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except Exception:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel one layer off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
# PyPy3 weirdness. If I don't access __context__, then somehow it
# is non-None when the exception is reported at the upper layer,
# and a nested exception is shown to the user. This getattr fixes
# it somehow? https://bitbucket.org/pypy/pypy/issue/1903
getattr(err, "__context__", None)
# Call the excepthook.
try:
if hasattr(err, "__traceback__"):
err.__traceback__ = err.__traceback__.tb_next
sys.excepthook(typ, err, tb.tb_next)
except SystemExit: # pylint: disable=try-except-raise
raise
except Exception:
# Getting the output right in the case of excepthook
# shenanigans is kind of involved.
sys.stderr.write("Error in sys.excepthook:\n")
typ2, err2, tb2 = sys.exc_info()
err2.__suppress_context__ = True
if hasattr(err2, "__traceback__"):
err2.__traceback__ = err2.__traceback__.tb_next
sys.__excepthook__(typ2, err2, tb2.tb_next)
sys.stderr.write("\nOriginal exception was:\n")
raise ExceptionDuringRun(typ, err, tb.tb_next)
else:
sys.exit(1)
finally:
os.chdir(cwd)
|
https://github.com/nedbat/coveragepy/issues/838
|
$ python --version
Python 3.5.6 :: Anaconda, Inc.
$ coverage --version
Coverage.py, version 4.5.1 with C extension
Documentation at https://coverage.readthedocs.io
$ cat foo.py
print(__name__)
print(__spec__.name)
$ python -m foo # Behaves as expected
__main__
foo
$ coverage run -m foo # Observe __spec__ is None
__main__
Traceback (most recent call last):
File "/nas/dft/ire/rhys/Build/liquidmetal-debt/foo.py", line 2, in <module>
print(__spec__.name)
AttributeError: 'NoneType' object has no attribute 'name'
Coverage.py warning: No data was collected. (no-data-collected)
|
AttributeError
|
def find_module(modulename):
"""Find the module named `modulename`.
Returns the file path of the module, the name of the enclosing
package, and None (where a spec would have been).
"""
openfile = None
glo, loc = globals(), locals()
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if "." in modulename:
packagename, name = modulename.rsplit(".", 1)
package = __import__(packagename, glo, loc, ["__path__"])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource("module does not live in a file: %r" % modulename)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = "__main__"
package = __import__(packagename, glo, loc, ["__path__"])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError as err:
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
return pathname, packagename, None
|
def find_module(modulename):
"""Find the module named `modulename`.
Returns the file path of the module, and the name of the enclosing
package.
"""
openfile = None
glo, loc = globals(), locals()
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if "." in modulename:
packagename, name = modulename.rsplit(".", 1)
package = __import__(packagename, glo, loc, ["__path__"])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource("module does not live in a file: %r" % modulename)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = "__main__"
package = __import__(packagename, glo, loc, ["__path__"])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError as err:
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
return pathname, packagename
|
https://github.com/nedbat/coveragepy/issues/838
|
$ python --version
Python 3.5.6 :: Anaconda, Inc.
$ coverage --version
Coverage.py, version 4.5.1 with C extension
Documentation at https://coverage.readthedocs.io
$ cat foo.py
print(__name__)
print(__spec__.name)
$ python -m foo # Behaves as expected
__main__
foo
$ coverage run -m foo # Observe __spec__ is None
__main__
Traceback (most recent call last):
File "/nas/dft/ire/rhys/Build/liquidmetal-debt/foo.py", line 2, in <module>
print(__spec__.name)
AttributeError: 'NoneType' object has no attribute 'name'
Coverage.py warning: No data was collected. (no-data-collected)
|
AttributeError
|
def qualname_from_frame(frame):
"""Get a qualified name for the code running in `frame`."""
co = frame.f_code
fname = co.co_name
method = None
if co.co_argcount and co.co_varnames[0] == "self":
self = frame.f_locals["self"]
method = getattr(self, fname, None)
if method is None:
func = frame.f_globals.get(fname)
if func is None:
return None
return func.__module__ + "." + fname
func = getattr(method, "__func__", None)
if func is None:
cls = self.__class__
return cls.__module__ + "." + cls.__name__ + "." + fname
if hasattr(func, "__qualname__"):
qname = func.__module__ + "." + func.__qualname__
else:
for cls in getattr(self.__class__, "__mro__", ()):
f = cls.__dict__.get(fname, None)
if f is None:
continue
if f is func:
qname = cls.__module__ + "." + cls.__name__ + "." + fname
break
else:
# Support for old-style classes.
def mro(bases):
for base in bases:
f = base.__dict__.get(fname, None)
if f is func:
return base.__module__ + "." + base.__name__ + "." + fname
for base in bases:
qname = mro(base.__bases__)
if qname is not None:
return qname
return None
qname = mro([self.__class__])
if qname is None:
qname = func.__module__ + "." + fname
return qname
|
def qualname_from_frame(frame):
"""Get a qualified name for the code running in `frame`."""
co = frame.f_code
fname = co.co_name
method = None
if co.co_argcount and co.co_varnames[0] == "self":
self = frame.f_locals["self"]
method = getattr(self, fname, None)
if method is None:
func = frame.f_globals[fname]
return func.__module__ + "." + fname
func = getattr(method, "__func__", None)
if func is None:
cls = self.__class__
return cls.__module__ + "." + cls.__name__ + "." + fname
if hasattr(func, "__qualname__"):
qname = func.__module__ + "." + func.__qualname__
else:
for cls in getattr(self.__class__, "__mro__", ()):
f = cls.__dict__.get(fname, None)
if f is None:
continue
if f is func:
qname = cls.__module__ + "." + cls.__name__ + "." + fname
break
else:
# Support for old-style classes.
def mro(bases):
for base in bases:
f = base.__dict__.get(fname, None)
if f is func:
return base.__module__ + "." + base.__name__ + "." + fname
for base in bases:
qname = mro(base.__bases__)
if qname is not None:
return qname
return None
qname = mro([self.__class__])
if qname is None:
qname = func.__module__ + "." + fname
return qname
|
https://github.com/nedbat/coveragepy/issues/829
|
Traceback (most recent call last):
File "/eggpath/p/pytest-3.1.0-py2.7.egg/_pytest/config.py", line 367, in _importconftest
mod = conftestpath.pyimport()
File "/eggpath/p/py-1.4.31-py2.7.egg/py/_path/local.py", line 650, in pyimport
__import__(modname)
File "/eggpath/p/pytest-3.1.0-py2.7.egg/_pytest/assertion/rewrite.py", line 216, in load_module
py.builtin.exec_(co, mod.__dict__)
File "/eggpath/p/py-1.4.31-py2.7.egg/py/_builtin.py", line 221, in exec_
exec2(obj, globals, locals)
File "<string>", line 7, in exec2
File "/workspace_path//test-path/tests/conftest.py", line 6, in <module>
import xxxx
...
...
File "/eggpath/t/tables-3.2.2-py2.7-linux-x86_64.egg/tables/__init__.py", line 123, in <module>
from tables.file import File, open_file, copy_file, openFile, copyFile
File "/eggpath/t/tables-3.2.2-py2.7-linux-x86_64.egg/tables/file.py", line 31, in <module>
import numexpr
File "/eggpath/n/numexpr-2.6.4-py2.7-linux-x86_64.egg/numexpr/__init__.py", line 42, in <module>
from numexpr.tests import test, print_versions
File "/eggpath/n/numexpr-2.6.4-py2.7-linux-x86_64.egg/numexpr/tests/__init__.py", line 11, in <module>
from numexpr.tests.test_numexpr import test, print_versions
File "/eggpath/p/pytest-3.1.0-py2.7.egg/_pytest/assertion/rewrite.py", line 216, in load_module
py.builtin.exec_(co, mod.__dict__)
File "/eggpath/p/py-1.4.31-py2.7.egg/py/_builtin.py", line 221, in exec_
exec2(obj, globals, locals)
File "<string>", line 7, in exec2
File "/eggpath/n/numexpr-2.6.4-py2.7-linux-x86_64.egg/numexpr/tests/test_numexpr.py", line 44, in <module>
class test_numexpr(TestCase):
File "/workspace_path//coveragepy/coverage/context.py", line 40, in should_start_context_test_function
return qualname_from_frame(frame)
File "/workspace_path//coveragepy/coverage/context.py", line 49, in qualname_from_frame
func = frame.f_globals[fname]
KeyError: 'test_numexpr'
ERROR: could not load /workspace_path//test-path/tests/conftest.py
|
KeyError
|
def pip(args):
# First things first, get a recent (stable) version of pip.
if not os.path.exists(TOX_PIP_DIR):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"--disable-pip-version-check",
"install",
"-t",
TOX_PIP_DIR,
"pip",
]
)
shutil.rmtree(glob(os.path.join(TOX_PIP_DIR, "pip-*.dist-info"))[0])
# And use that version.
pypath = os.environ.get("PYTHONPATH")
pypath = pypath.split(os.pathsep) if pypath is not None else []
pypath.insert(0, TOX_PIP_DIR)
os.environ["PYTHONPATH"] = os.pathsep.join(pypath)
# Disable PEP 517 support when using editable installs.
for n, a in enumerate(args):
if not a.startswith("-"):
if a in "install" and "-e" in args[n:]:
args.insert(n + 1, "--no-use-pep517")
break
# Fix call for setuptools editable install.
for n, a in enumerate(args):
if a == ".":
args[n] = os.getcwd()
subprocess.check_call([sys.executable, "-m", "pip"] + args, cwd=TOX_PIP_DIR)
|
def pip(args):
# First things first, get a recent (stable) version of pip.
if not os.path.exists(TOX_PIP_DIR):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"--disable-pip-version-check",
"install",
"-t",
TOX_PIP_DIR,
"pip",
]
)
shutil.rmtree(glob(os.path.join(TOX_PIP_DIR, "pip-*.dist-info"))[0])
# And use that version.
for n, a in enumerate(args):
if not a.startswith("-"):
if a in "install" and "-e" in args[n:]:
args.insert(n + 1, "--no-use-pep517")
break
subprocess.check_call([sys.executable, os.path.join(TOX_PIP_DIR, "pip")] + args)
|
https://github.com/pypa/setuptools/issues/1644
|
setuptools fix_889_and_non-ascii_in_setup.cfg_take_2 $ tox -e py27
py27 create: /Users/jaraco/code/main/setuptools/.tox/py27
py27 installdeps: -rtests/requirements.txt
py27 develop-inst: /Users/jaraco/code/main/setuptools
ERROR: invocation failed (exit code 2), logfile: /Users/jaraco/code/main/setuptools/.tox/py27/log/py27-2.log
ERROR: actionid: py27
msg: developpkg
cmdargs: '/Users/jaraco/code/main/setuptools/.tox/py27/bin/pip install --exists-action w -e /Users/jaraco/code/main/setuptools'
DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7.
Obtaining file:///Users/jaraco/code/main/setuptools
Installing build dependencies: started
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'done'
Exception:
Traceback (most recent call last):
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/cli/base_command.py", line 176, in main
status = self.run(options, args)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/commands/install.py", line 315, in run
resolver.resolve(requirement_set)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/resolve.py", line 131, in resolve
self._resolve_one(requirement_set, req)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/resolve.py", line 294, in _resolve_one
abstract_dist = self._get_abstract_dist_for(req_to_install)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/resolve.py", line 226, in _get_abstract_dist_for
req, self.require_hashes, self.use_user_site, self.finder,
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/operations/prepare.py", line 382, in prepare_editable_requirement
abstract_dist.prep_for_dist(finder, self.build_isolation)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/operations/prepare.py", line 149, in prep_for_dist
reqs = self.req.pep517_backend.get_requires_for_build_wheel()
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.py", line 71, in get_requires_for_build_wheel
'config_settings': config_settings
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.py", line 162, in _call_hook
raise BackendUnavailable
BackendUnavailable
py27 installed: DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7.,apipkg==1.5,atomicwrites==1.2.1,attrs==18.2.0,configparser==3.7.1,contextlib2==0.5.5,coverage==4.5.2,enum34==1.1.6,execnet==1.5.0,flake8==3.6.0,funcsigs==1.0.2,futures==3.2.0,importlib-metadata==0.8,mccabe==0.6.1,mock==2.0.0,more-itertools==5.0.0,path.py==11.5.0,pathlib2==2.3.3,pbr==5.1.1,pluggy==0.8.1,py==1.7.0,pycodestyle==2.4.0,pyflakes==2.0.0,pytest==3.10.1,pytest-cov==2.6.1,pytest-fixture-config==1.4.0,pytest-flake8==1.0.3,pytest-shutil==1.4.0,pytest-virtualenv==1.4.0,scandir==1.9.0,six==1.12.0,termcolor==1.1.0,virtualenv==16.3.0,zipp==0.3.3
________________________________________________________________________________________ summary ________________________________________________________________________________________
ERROR: py27: InvocationError for command /Users/jaraco/code/main/setuptools/.tox/py27/bin/pip install --exists-action w -e /Users/jaraco/code/main/setuptools (see /Users/jaraco/code/main/setuptools/.tox/py27/log/py27-2.log) (exited with code 2)
|
Exception
|
def pip(args):
# First things first, get a recent (stable) version of pip.
if not os.path.exists(TOX_PIP_DIR):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"--disable-pip-version-check",
"install",
"-t",
TOX_PIP_DIR,
"pip",
]
)
shutil.rmtree(glob(os.path.join(TOX_PIP_DIR, "pip-*.dist-info"))[0])
# And use that version.
pypath = os.environ.get("PYTHONPATH")
pypath = pypath.split(os.pathsep) if pypath is not None else []
pypath.insert(0, TOX_PIP_DIR)
os.environ["PYTHONPATH"] = os.pathsep.join(pypath)
# Fix call for setuptools editable install.
for n, a in enumerate(args):
if a == ".":
args[n] = os.getcwd()
subprocess.check_call([sys.executable, "-m", "pip"] + args, cwd=TOX_PIP_DIR)
|
def pip(args):
# First things first, get a recent (stable) version of pip.
if not os.path.exists(TOX_PIP_DIR):
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"--disable-pip-version-check",
"install",
"-t",
TOX_PIP_DIR,
"pip",
]
)
shutil.rmtree(glob(os.path.join(TOX_PIP_DIR, "pip-*.dist-info"))[0])
# And use that version.
pypath = os.environ.get("PYTHONPATH")
pypath = pypath.split(os.pathsep) if pypath is not None else []
pypath.insert(0, TOX_PIP_DIR)
os.environ["PYTHONPATH"] = os.pathsep.join(pypath)
# Disable PEP 517 support when using editable installs.
for n, a in enumerate(args):
if not a.startswith("-"):
if a in "install" and "-e" in args[n:]:
args.insert(n + 1, "--no-use-pep517")
break
# Fix call for setuptools editable install.
for n, a in enumerate(args):
if a == ".":
args[n] = os.getcwd()
subprocess.check_call([sys.executable, "-m", "pip"] + args, cwd=TOX_PIP_DIR)
|
https://github.com/pypa/setuptools/issues/1644
|
setuptools fix_889_and_non-ascii_in_setup.cfg_take_2 $ tox -e py27
py27 create: /Users/jaraco/code/main/setuptools/.tox/py27
py27 installdeps: -rtests/requirements.txt
py27 develop-inst: /Users/jaraco/code/main/setuptools
ERROR: invocation failed (exit code 2), logfile: /Users/jaraco/code/main/setuptools/.tox/py27/log/py27-2.log
ERROR: actionid: py27
msg: developpkg
cmdargs: '/Users/jaraco/code/main/setuptools/.tox/py27/bin/pip install --exists-action w -e /Users/jaraco/code/main/setuptools'
DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7.
Obtaining file:///Users/jaraco/code/main/setuptools
Installing build dependencies: started
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'done'
Exception:
Traceback (most recent call last):
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/cli/base_command.py", line 176, in main
status = self.run(options, args)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/commands/install.py", line 315, in run
resolver.resolve(requirement_set)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/resolve.py", line 131, in resolve
self._resolve_one(requirement_set, req)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/resolve.py", line 294, in _resolve_one
abstract_dist = self._get_abstract_dist_for(req_to_install)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/resolve.py", line 226, in _get_abstract_dist_for
req, self.require_hashes, self.use_user_site, self.finder,
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/operations/prepare.py", line 382, in prepare_editable_requirement
abstract_dist.prep_for_dist(finder, self.build_isolation)
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_internal/operations/prepare.py", line 149, in prep_for_dist
reqs = self.req.pep517_backend.get_requires_for_build_wheel()
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.py", line 71, in get_requires_for_build_wheel
'config_settings': config_settings
File "/Users/jaraco/code/main/setuptools/.tox/py27/lib/python2.7/site-packages/pip/_vendor/pep517/wrappers.py", line 162, in _call_hook
raise BackendUnavailable
BackendUnavailable
py27 installed: DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7.,apipkg==1.5,atomicwrites==1.2.1,attrs==18.2.0,configparser==3.7.1,contextlib2==0.5.5,coverage==4.5.2,enum34==1.1.6,execnet==1.5.0,flake8==3.6.0,funcsigs==1.0.2,futures==3.2.0,importlib-metadata==0.8,mccabe==0.6.1,mock==2.0.0,more-itertools==5.0.0,path.py==11.5.0,pathlib2==2.3.3,pbr==5.1.1,pluggy==0.8.1,py==1.7.0,pycodestyle==2.4.0,pyflakes==2.0.0,pytest==3.10.1,pytest-cov==2.6.1,pytest-fixture-config==1.4.0,pytest-flake8==1.0.3,pytest-shutil==1.4.0,pytest-virtualenv==1.4.0,scandir==1.9.0,six==1.12.0,termcolor==1.1.0,virtualenv==16.3.0,zipp==0.3.3
________________________________________________________________________________________ summary ________________________________________________________________________________________
ERROR: py27: InvocationError for command /Users/jaraco/code/main/setuptools/.tox/py27/bin/pip install --exists-action w -e /Users/jaraco/code/main/setuptools (see /Users/jaraco/code/main/setuptools/.tox/py27/log/py27-2.log) (exited with code 2)
|
Exception
|
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command("build_sphinx")
self.target_dir = dict(build_sphinx.builder_target_dirs)["html"]
else:
build = self.get_finalized_command("build")
self.target_dir = os.path.join(build.build_base, "docs")
else:
self.ensure_dirname("upload_dir")
self.target_dir = self.upload_dir
if "pypi.python.org" in self.repository:
log.warn("Upload_docs command is deprecated for PyPi. Use RTD instead.")
self.announce("Using upload directory %s" % self.target_dir)
|
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command("build_sphinx")
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command("build")
self.target_dir = os.path.join(build.build_base, "docs")
else:
self.ensure_dirname("upload_dir")
self.target_dir = self.upload_dir
if "pypi.python.org" in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce("Using upload directory %s" % self.target_dir)
|
https://github.com/pypa/setuptools/issues/1060
|
Traceback (most recent call last):
File "setup.py", line 67, in <module>
setup(**setup_args)
File "/usr/lib/python3.5/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.5/distutils/dist.py", line 955, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.5/distutils/dist.py", line 973, in run_command
cmd_obj.ensure_finalized()
File "/usr/lib/python3.5/distutils/cmd.py", line 107, in ensure_finalized
self.finalize_options()
File "/home/dmitry/.local/lib/python3.5/site-packages/setuptools/command/upload_docs.py", line 65, in finalize_options
self.target_dir = build_sphinx.builder_target_dir
File "/usr/lib/python3.5/distutils/cmd.py", line 103, in __getattr__
raise AttributeError(attr)
AttributeError: builder_target_dir
|
AttributeError
|
def spec_for_distutils(self):
import importlib.abc
import importlib.util
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
return importlib.import_module("setuptools._distutils")
def exec_module(self, module):
pass
return importlib.util.spec_from_loader("distutils", DistutilsLoader())
|
def spec_for_distutils(self):
import importlib.abc
import importlib.util
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
return importlib.import_module("._distutils", "setuptools")
def exec_module(self, module):
pass
return importlib.util.spec_from_loader("distutils", DistutilsLoader())
|
https://github.com/pypa/setuptools/issues/2352
|
$ python --version
Python 3.5.1
$ python -c "import distutils"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "<frozen importlib._bootstrap>", line 969, in _find_and_load
File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 666, in _load_unlocked
File "<frozen importlib._bootstrap>", line 577, in module_from_spec
File "/home/gchan/tmp/setuptools-python-3.5/lib/python3.5/site-packages/_distutils_hack/__init__.py", line 82, in create_module
return importlib.import_module('._distutils', 'setuptools')
File "/home/gchan/tmp/setuptools-python-3.5/lib64/python3.5/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 981, in _gcd_import
File "<frozen importlib._bootstrap>", line 931, in _sanity_check
SystemError: Parent module 'setuptools' not loaded, cannot perform relative import
|
SystemError
|
def create_module(self, spec):
return importlib.import_module("setuptools._distutils")
|
def create_module(self, spec):
return importlib.import_module("._distutils", "setuptools")
|
https://github.com/pypa/setuptools/issues/2352
|
$ python --version
Python 3.5.1
$ python -c "import distutils"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "<frozen importlib._bootstrap>", line 969, in _find_and_load
File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 666, in _load_unlocked
File "<frozen importlib._bootstrap>", line 577, in module_from_spec
File "/home/gchan/tmp/setuptools-python-3.5/lib/python3.5/site-packages/_distutils_hack/__init__.py", line 82, in create_module
return importlib.import_module('._distutils', 'setuptools')
File "/home/gchan/tmp/setuptools-python-3.5/lib64/python3.5/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 981, in _gcd_import
File "<frozen importlib._bootstrap>", line 931, in _sanity_check
SystemError: Parent module 'setuptools' not loaded, cannot perform relative import
|
SystemError
|
def assert_string_list(dist, attr, value):
"""Verify that value is a string list"""
try:
# verify that value is a list or tuple to exclude unordered
# or single-use iterables
assert isinstance(value, (list, tuple))
# verify that elements of value are strings
assert "".join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
|
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert "".join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
|
https://github.com/pypa/setuptools/issues/1459
|
%CMD_IN_ENV% pip install .
Processing c:\projects\manubot
Complete output from command python setup.py egg_info:
running egg_info
creating pip-egg-info\manubot.egg-info
writing pip-egg-info\manubot.egg-info\PKG-INFO
writing dependency_links to pip-egg-info\manubot.egg-info\dependency_links.txt
writing entry points to pip-egg-info\manubot.egg-info\entry_points.txt
writing requirements to pip-egg-info\manubot.egg-info\requires.txt
writing top-level names to pip-egg-info\manubot.egg-info\top_level.txt
writing manifest file 'pip-egg-info\manubot.egg-info\SOURCES.txt'
c:\python36\lib\distutils\dist.py:261: UserWarning: Unknown distribution option: 'long_description_content_type'
warnings.warn(msg)
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-akczu3gv\setup.py", line 75, in <module>
'manubot': 'cite/*.lua',
File "c:\python36\lib\site-packages\setuptools\__init__.py", line 129, in setup
return distutils.core.setup(**attrs)
File "c:\python36\lib\distutils\core.py", line 148, in setup
dist.run_commands()
File "c:\python36\lib\distutils\dist.py", line 955, in run_commands
self.run_command(cmd)
File "c:\python36\lib\distutils\dist.py", line 974, in run_command
cmd_obj.run()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 278, in run
self.find_sources()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 293, in find_sources
mm.run()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 524, in run
self.add_defaults()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 560, in add_defaults
sdist.add_defaults(self)
File "c:\python36\lib\site-packages\setuptools\command\py36compat.py", line 34, in add_defaults
self._add_defaults_python()
File "c:\python36\lib\site-packages\setuptools\command\sdist.py", line 134, in _add_defaults_python
for _, src_dir, _, filenames in build_py.data_files:
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 66, in __getattr__
self.data_files = self._get_data_files()
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 83, in _get_data_files
return list(map(self._get_pkg_data_files, self.packages or ()))
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 95, in _get_pkg_data_files
for file in self.find_data_files(package, src_dir)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 114, in find_data_files
return self.exclude_data_files(package, src_dir, files)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 198, in exclude_data_files
files = list(files)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 234, in <genexpr>
for pattern in raw_patterns
File "c:\python36\lib\distutils\util.py", line 125, in convert_path
raise ValueError("path '%s' cannot be absolute" % pathname)
ValueError: path '/' cannot be absolute
|
ValueError
|
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if not isinstance(value, dict):
raise DistutilsSetupError(
"{!r} must be a dictionary mapping package names to lists of "
"string wildcard patterns".format(attr)
)
for k, v in value.items():
if not isinstance(k, six.string_types):
raise DistutilsSetupError(
"keys of {!r} dict must be strings (got {!r})".format(attr, k)
)
assert_string_list(dist, "values of {!r} dict".format(attr), v)
|
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value, dict):
for k, v in value.items():
if not isinstance(k, str):
break
try:
iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr + " must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
|
https://github.com/pypa/setuptools/issues/1459
|
%CMD_IN_ENV% pip install .
Processing c:\projects\manubot
Complete output from command python setup.py egg_info:
running egg_info
creating pip-egg-info\manubot.egg-info
writing pip-egg-info\manubot.egg-info\PKG-INFO
writing dependency_links to pip-egg-info\manubot.egg-info\dependency_links.txt
writing entry points to pip-egg-info\manubot.egg-info\entry_points.txt
writing requirements to pip-egg-info\manubot.egg-info\requires.txt
writing top-level names to pip-egg-info\manubot.egg-info\top_level.txt
writing manifest file 'pip-egg-info\manubot.egg-info\SOURCES.txt'
c:\python36\lib\distutils\dist.py:261: UserWarning: Unknown distribution option: 'long_description_content_type'
warnings.warn(msg)
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-akczu3gv\setup.py", line 75, in <module>
'manubot': 'cite/*.lua',
File "c:\python36\lib\site-packages\setuptools\__init__.py", line 129, in setup
return distutils.core.setup(**attrs)
File "c:\python36\lib\distutils\core.py", line 148, in setup
dist.run_commands()
File "c:\python36\lib\distutils\dist.py", line 955, in run_commands
self.run_command(cmd)
File "c:\python36\lib\distutils\dist.py", line 974, in run_command
cmd_obj.run()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 278, in run
self.find_sources()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 293, in find_sources
mm.run()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 524, in run
self.add_defaults()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 560, in add_defaults
sdist.add_defaults(self)
File "c:\python36\lib\site-packages\setuptools\command\py36compat.py", line 34, in add_defaults
self._add_defaults_python()
File "c:\python36\lib\site-packages\setuptools\command\sdist.py", line 134, in _add_defaults_python
for _, src_dir, _, filenames in build_py.data_files:
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 66, in __getattr__
self.data_files = self._get_data_files()
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 83, in _get_data_files
return list(map(self._get_pkg_data_files, self.packages or ()))
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 95, in _get_pkg_data_files
for file in self.find_data_files(package, src_dir)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 114, in find_data_files
return self.exclude_data_files(package, src_dir, files)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 198, in exclude_data_files
files = list(files)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 234, in <genexpr>
for pattern in raw_patterns
File "c:\python36\lib\distutils\util.py", line 125, in convert_path
raise ValueError("path '%s' cannot be absolute" % pathname)
ValueError: path '/' cannot be absolute
|
ValueError
|
def assert_string_list(dist, attr, value):
"""Verify that value is a string list"""
try:
# verify that value is a list or tuple to exclude unordered
# or single-use iterables
assert isinstance(value, (list, tuple))
# verify that elements of value are strings
assert "".join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
|
def assert_string_list(dist, attr, value):
"""Verify that value is a string list"""
try:
assert "".join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
|
https://github.com/pypa/setuptools/issues/1459
|
%CMD_IN_ENV% pip install .
Processing c:\projects\manubot
Complete output from command python setup.py egg_info:
running egg_info
creating pip-egg-info\manubot.egg-info
writing pip-egg-info\manubot.egg-info\PKG-INFO
writing dependency_links to pip-egg-info\manubot.egg-info\dependency_links.txt
writing entry points to pip-egg-info\manubot.egg-info\entry_points.txt
writing requirements to pip-egg-info\manubot.egg-info\requires.txt
writing top-level names to pip-egg-info\manubot.egg-info\top_level.txt
writing manifest file 'pip-egg-info\manubot.egg-info\SOURCES.txt'
c:\python36\lib\distutils\dist.py:261: UserWarning: Unknown distribution option: 'long_description_content_type'
warnings.warn(msg)
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-akczu3gv\setup.py", line 75, in <module>
'manubot': 'cite/*.lua',
File "c:\python36\lib\site-packages\setuptools\__init__.py", line 129, in setup
return distutils.core.setup(**attrs)
File "c:\python36\lib\distutils\core.py", line 148, in setup
dist.run_commands()
File "c:\python36\lib\distutils\dist.py", line 955, in run_commands
self.run_command(cmd)
File "c:\python36\lib\distutils\dist.py", line 974, in run_command
cmd_obj.run()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 278, in run
self.find_sources()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 293, in find_sources
mm.run()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 524, in run
self.add_defaults()
File "c:\python36\lib\site-packages\setuptools\command\egg_info.py", line 560, in add_defaults
sdist.add_defaults(self)
File "c:\python36\lib\site-packages\setuptools\command\py36compat.py", line 34, in add_defaults
self._add_defaults_python()
File "c:\python36\lib\site-packages\setuptools\command\sdist.py", line 134, in _add_defaults_python
for _, src_dir, _, filenames in build_py.data_files:
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 66, in __getattr__
self.data_files = self._get_data_files()
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 83, in _get_data_files
return list(map(self._get_pkg_data_files, self.packages or ()))
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 95, in _get_pkg_data_files
for file in self.find_data_files(package, src_dir)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 114, in find_data_files
return self.exclude_data_files(package, src_dir, files)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 198, in exclude_data_files
files = list(files)
File "c:\python36\lib\site-packages\setuptools\command\build_py.py", line 234, in <genexpr>
for pattern in raw_patterns
File "c:\python36\lib\distutils\util.py", line 125, in convert_path
raise ValueError("path '%s' cannot be absolute" % pathname)
ValueError: path '/' cannot be absolute
|
ValueError
|
def _parse_config_files(self, filenames=None):
"""
Adapted from distutils.dist.Distribution.parse_config_files,
this method provides the same functionality in subtly-improved
ways.
"""
from setuptools.extern.six.moves.configparser import ConfigParser
# Ignore install directory options if we have a venv
if six.PY3 and sys.prefix != sys.base_prefix:
ignore_options = [
"install-base",
"install-platbase",
"install-lib",
"install-platlib",
"install-purelib",
"install-headers",
"install-scripts",
"install-data",
"prefix",
"exec-prefix",
"home",
"user",
"root",
]
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
with io.open(filename, encoding="utf-8") as reader:
if DEBUG:
self.announce(" reading {filename}".format(**locals()))
(parser.read_file if six.PY3 else parser.readfp)(reader)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != "__name__" and opt not in ignore_options:
val = self._try_str(parser.get(section, opt))
opt = opt.replace("-", "_")
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if "global" in self.command_options:
for opt, (src, val) in self.command_options["global"].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ("verbose", "dry_run"): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
|
def _parse_config_files(self, filenames=None):
"""
Adapted from distutils.dist.Distribution.parse_config_files,
this method provides the same functionality in subtly-improved
ways.
"""
from setuptools.extern.six.moves.configparser import ConfigParser
# Ignore install directory options if we have a venv
if six.PY3 and sys.prefix != sys.base_prefix:
ignore_options = [
"install-base",
"install-platbase",
"install-lib",
"install-platlib",
"install-purelib",
"install-headers",
"install-scripts",
"install-data",
"prefix",
"exec-prefix",
"home",
"user",
"root",
]
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
with io.open(filename, "rb") as fp:
encoding = detect_encoding(fp)
if DEBUG:
self.announce(" reading %s [%s]" % (filename, encoding or "locale"))
reader = io.TextIOWrapper(fp, encoding=encoding)
(parser.read_file if six.PY3 else parser.readfp)(reader)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != "__name__" and opt not in ignore_options:
val = self._try_str(parser.get(section, opt))
opt = opt.replace("-", "_")
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if "global" in self.command_options:
for opt, (src, val) in self.command_options["global"].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ("verbose", "dry_run"): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
|
https://github.com/pypa/setuptools/issues/1702
|
configparser # easy_install --version
setuptools 40.8.0 from c:\python37\lib\site-packages (Python 3.7)
configparser 3.7.2 # python setup.py egg_info
Traceback (most recent call last):
File "setup.py", line 5, in <module>
package_dir={'': 'src'},
File "C:\Python37\lib\site-packages\setuptools\__init__.py", line 144, in setup
_install_setup_requires(attrs)
File "C:\Python37\lib\site-packages\setuptools\__init__.py", line 137, in _install_setup_requires
dist.parse_config_files(ignore_option_errors=True)
File "C:\Python37\lib\site-packages\setuptools\dist.py", line 702, in parse_config_files
self._parse_config_files(filenames=filenames)
File "C:\Python37\lib\site-packages\setuptools\dist.py", line 599, in _parse_config_files
(parser.read_file if six.PY3 else parser.readfp)(reader)
File "C:\Python37\lib\configparser.py", line 717, in read_file
self._read(f, source)
File "C:\Python37\lib\configparser.py", line 1014, in _read
for lineno, line in enumerate(fp, start=1):
File "C:\Python37\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x81 in position 103: character maps to <undefined>
|
UnicodeDecodeError
|
def create_certificate(self, csr, issuer_options):
"""
Creates a CFSSL certificate.
:param csr:
:param issuer_options:
:return:
"""
current_app.logger.info(
"Requesting a new cfssl certificate with csr: {0}".format(csr)
)
url = "{0}{1}".format(current_app.config.get("CFSSL_URL"), "/api/v1/cfssl/sign")
data = {"certificate_request": csr}
data = json.dumps(data)
try:
hex_key = current_app.config.get("CFSSL_KEY")
key = bytes.fromhex(hex_key)
except (ValueError, NameError, TypeError):
# unable to find CFSSL_KEY in config, continue using normal sign method
pass
else:
data = data.encode()
token = base64.b64encode(hmac.new(key, data, digestmod=hashlib.sha256).digest())
data = base64.b64encode(data)
data = json.dumps(
{"token": token.decode("utf-8"), "request": data.decode("utf-8")}
)
url = "{0}{1}".format(
current_app.config.get("CFSSL_URL"), "/api/v1/cfssl/authsign"
)
response = self.session.post(
url, data=data.encode(encoding="utf_8", errors="strict")
)
if response.status_code > 399:
metrics.send("cfssl_create_certificate_failure", "counter", 1)
raise Exception("Error creating cert. Please check your CFSSL API server")
response_json = json.loads(response.content.decode("utf_8"))
cert = response_json["result"]["certificate"]
parsed_cert = parse_certificate(cert)
metrics.send("cfssl_create_certificate_success", "counter", 1)
return (
cert,
current_app.config.get("CFSSL_INTERMEDIATE"),
parsed_cert.serial_number,
)
|
def create_certificate(self, csr, issuer_options):
"""
Creates a CFSSL certificate.
:param csr:
:param issuer_options:
:return:
"""
current_app.logger.info(
"Requesting a new cfssl certificate with csr: {0}".format(csr)
)
url = "{0}{1}".format(current_app.config.get("CFSSL_URL"), "/api/v1/cfssl/sign")
data = {"certificate_request": csr}
data = json.dumps(data)
try:
hex_key = current_app.config.get("CFSSL_KEY")
key = bytes.fromhex(hex_key)
except (ValueError, NameError):
# unable to find CFSSL_KEY in config, continue using normal sign method
pass
else:
data = data.encode()
token = base64.b64encode(hmac.new(key, data, digestmod=hashlib.sha256).digest())
data = base64.b64encode(data)
data = json.dumps(
{"token": token.decode("utf-8"), "request": data.decode("utf-8")}
)
url = "{0}{1}".format(
current_app.config.get("CFSSL_URL"), "/api/v1/cfssl/authsign"
)
response = self.session.post(
url, data=data.encode(encoding="utf_8", errors="strict")
)
if response.status_code > 399:
metrics.send("cfssl_create_certificate_failure", "counter", 1)
raise Exception("Error creating cert. Please check your CFSSL API server")
response_json = json.loads(response.content.decode("utf_8"))
cert = response_json["result"]["certificate"]
parsed_cert = parse_certificate(cert)
metrics.send("cfssl_create_certificate_success", "counter", 1)
return (
cert,
current_app.config.get("CFSSL_INTERMEDIATE"),
parsed_cert.serial_number,
)
|
https://github.com/Netflix/lemur/issues/2879
|
[2020-01-07 17:46:32,386] ERROR in service: Exception minting certificate
Traceback (most recent call last):
File "/www/lemur/lemur/certificates/service.py", line 273, in create
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
File "/www/lemur/lemur/certificates/service.py", line 223, in mint
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
File "/www/lemur/lemur/plugins/lemur_cfssl/plugin.py", line 58, in create_certificate
key = bytes.fromhex(hex_key)
TypeError: fromhex() argument must be str, not None
Exception minting certificate
Traceback (most recent call last):
File "/www/lemur/lemur/certificates/service.py", line 273, in create
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
File "/www/lemur/lemur/certificates/service.py", line 223, in mint
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
File "/www/lemur/lemur/plugins/lemur_cfssl/plugin.py", line 58, in create_certificate
key = bytes.fromhex(hex_key)
TypeError: fromhex() argument must be str, not None
Exception minting certificate
Traceback (most recent call last):
File "/www/lemur/lemur/certificates/service.py", line 273, in create
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
File "/www/lemur/lemur/certificates/service.py", line 223, in mint
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
File "/www/lemur/lemur/plugins/lemur_cfssl/plugin.py", line 58, in create_certificate
key = bytes.fromhex(hex_key)
TypeError: fromhex() argument must be str, not None
[2020-01-07 17:46:32,387] ERROR in schema: fromhex() argument must be str, not None
Traceback (most recent call last):
File "/www/lemur/lemur/common/schema.py", line 160, in decorated_function
resp = f(*args, **kwargs)
File "/www/lemur/lemur/certificates/views.py", line 482, in post
cert = service.create(**data)
File "/www/lemur/lemur/certificates/service.py", line 273, in create
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
File "/www/lemur/lemur/certificates/service.py", line 223, in mint
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
File "/www/lemur/lemur/plugins/lemur_cfssl/plugin.py", line 58, in create_certificate
key = bytes.fromhex(hex_key)
TypeError: fromhex() argument must be str, not None
fromhex() argument must be str, not None
Traceback (most recent call last):
File "/www/lemur/lemur/common/schema.py", line 160, in decorated_function
resp = f(*args, **kwargs)
File "/www/lemur/lemur/certificates/views.py", line 482, in post
cert = service.create(**data)
File "/www/lemur/lemur/certificates/service.py", line 273, in create
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
File "/www/lemur/lemur/certificates/service.py", line 223, in mint
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
File "/www/lemur/lemur/plugins/lemur_cfssl/plugin.py", line 58, in create_certificate
key = bytes.fromhex(hex_key)
TypeError: fromhex() argument must be str, not None
fromhex() argument must be str, not None
Traceback (most recent call last):
File "/www/lemur/lemur/common/schema.py", line 160, in decorated_function
resp = f(*args, **kwargs)
File "/www/lemur/lemur/certificates/views.py", line 482, in post
cert = service.create(**data)
File "/www/lemur/lemur/certificates/service.py", line 273, in create
cert_body, private_key, cert_chain, external_id, csr = mint(**kwargs)
File "/www/lemur/lemur/certificates/service.py", line 223, in mint
cert_body, cert_chain, external_id = issuer.create_certificate(csr, kwargs)
File "/www/lemur/lemur/plugins/lemur_cfssl/plugin.py", line 58, in create_certificate
key = bytes.fromhex(hex_key)
TypeError: fromhex() argument must be str, not None
|
TypeError
|
def validate_options(options):
"""
Ensures that the plugin options are valid.
:param options:
:return:
"""
interval = get_plugin_option("interval", options)
unit = get_plugin_option("unit", options)
if not interval and not unit:
return
if unit == "month":
interval *= 30
elif unit == "week":
interval *= 7
if interval > 90:
raise ValidationError(
"Notification cannot be more than 90 days into the future."
)
|
def validate_options(options):
"""
Ensures that the plugin options are valid.
:param options:
:return:
"""
interval = get_plugin_option("interval", options)
unit = get_plugin_option("unit", options)
if not interval and not unit:
return
if interval == "month":
unit *= 30
elif interval == "week":
unit *= 7
if unit > 90:
raise ValidationError(
"Notification cannot be more than 90 days into the future."
)
|
https://github.com/Netflix/lemur/issues/752
|
2017-04-12 17:35:33,074 ERROR: Exception on /api/1/notifications/950 [PUT] [in /apps/lemur/lib/python3.5/site-packages/flask/app.py:1560]
Traceback (most recent call last):
File "/apps/lemur/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/apps/lemur/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/apps/lemur/lib/python3.5/site-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/apps/lemur/lib/python3.5/site-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/apps/lemur/lib/python3.5/site-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/apps/lemur/lemur/auth/service.py", line 110, in decorated_function
return f(*args, **kwargs)
File "/apps/lemur/lemur/common/schema.py", line 150, in decorated_function
data, errors = input_schema.load(request_data)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/schema.py", line 580, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/schema.py", line 660, in _do_load
index_errors=self.opts.index_errors,
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/marshalling.py", line 295, in deserialize
index=(index if index_errors else None)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/marshalling.py", line 68, in call_and_store
value = getter_func(data)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/marshalling.py", line 288, in <lambda>
data
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/fields.py", line 265, in deserialize
output = self._deserialize(value, attr, data)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/fields.py", line 465, in _deserialize
data, errors = self.schema.load(value)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/schema.py", line 580, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/schema.py", line 660, in _do_load
index_errors=self.opts.index_errors,
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/marshalling.py", line 295, in deserialize
index=(index if index_errors else None)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/marshalling.py", line 68, in call_and_store
value = getter_func(data)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/marshalling.py", line 288, in <lambda>
data
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/fields.py", line 266, in deserialize
self._validate(output)
File "/apps/lemur/lib/python3.5/site-packages/marshmallow/fields.py", line 196, in _validate
r = validator(value)
File "/apps/lemur/lemur/schemas.py", line 47, in validate_options
if unit > 90:
TypeError: unorderable types: str() > int()
|
TypeError
|
def create(kwargs):
"""
Create a new authority.
:return:
"""
issuer = plugins.get(kwargs.get("pluginName"))
kwargs["creator"] = g.current_user.email
cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)
cert = Certificate(cert_body, chain=intermediate)
cert.owner = kwargs["ownerEmail"]
if kwargs["caType"] == "subca":
cert.description = (
"This is the ROOT certificate for the {0} sub certificate authority the parent \
authority is {1}.".format(
kwargs.get("caName"), kwargs.get("caParent")
)
)
else:
cert.description = (
"This is the ROOT certificate for the {0} certificate authority.".format(
kwargs.get("caName")
)
)
cert.user = g.current_user
cert.notifications = notification_service.create_default_expiration_notifications(
"DEFAULT_SECURITY", current_app.config.get("LEMUR_SECURITY_TEAM_EMAIL")
)
# we create and attach any roles that the issuer gives us
role_objs = []
for r in issuer_roles:
role = role_service.create(
r["name"],
password=r["password"],
description="{0} auto generated role".format(kwargs.get("pluginName")),
username=r["username"],
)
# the user creating the authority should be able to administer it
if role.username == "admin":
g.current_user.roles.append(role)
role_objs.append(role)
authority = Authority(
kwargs.get("caName"),
kwargs["ownerEmail"],
kwargs["pluginName"],
cert_body,
description=kwargs["caDescription"],
chain=intermediate,
roles=role_objs,
)
database.update(cert)
authority = database.create(authority)
# the owning dl or role should have this authority associated with it
owner_role = role_service.get_by_name(kwargs["ownerEmail"])
if not owner_role:
owner_role = role_service.create(kwargs["ownerEmail"])
owner_role.authority = authority
g.current_user.authorities.append(authority)
return authority
|
def create(kwargs):
"""
Create a new authority.
:return:
"""
issuer = plugins.get(kwargs.get("pluginName"))
kwargs["creator"] = g.current_user.email
cert_body, intermediate, issuer_roles = issuer.create_authority(kwargs)
cert = Certificate(cert_body, chain=intermediate)
cert.owner = kwargs["ownerEmail"]
if kwargs["caType"] == "subca":
cert.description = (
"This is the ROOT certificate for the {0} sub certificate authority the parent \
authority is {1}.".format(
kwargs.get("caName"), kwargs.get("caParent")
)
)
else:
cert.description = (
"This is the ROOT certificate for the {0} certificate authority.".format(
kwargs.get("caName")
)
)
cert.user = g.current_user
cert.notifications = notification_service.create_default_expiration_notifications(
"DEFAULT_SECURITY", current_app.config.get("LEMUR_SECURITY_TEAM_EMAIL")
)
# we create and attach any roles that the issuer gives us
role_objs = []
for r in issuer_roles:
role = role_service.create(
r["name"],
password=r["password"],
description="{0} auto generated role".format(kwargs.get("pluginName")),
username=r["username"],
)
# the user creating the authority should be able to administer it
if role.username == "admin":
g.current_user.roles.append(role)
role_objs.append(role)
authority = Authority(
kwargs.get("caName"),
kwargs["ownerEmail"],
kwargs["pluginName"],
cert_body,
description=kwargs["caDescription"],
chain=intermediate,
roles=role_objs,
)
database.update(cert)
authority = database.create(authority)
# the owning dl or role should have this authority associated with it
owner_role = role_service.get_by_name(kwargs["ownerEmail"])
owner_role.authority = authority
g.current_user.authorities.append(authority)
return authority
|
https://github.com/Netflix/lemur/issues/261
|
2016-03-31 16:21:39,507 ERROR: 'NoneType' object has no attribute 'authority' [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/authorities/views.py", line 201, in post
return service.create(args)
File "/apps/lemur/lemur/authorities/service.py", line 106, in create
owner_role.authority = authority
AttributeError: 'NoneType' object has no attribute 'authority'
|
AttributeError
|
def mint(issuer_options):
"""
Minting is slightly different for each authority.
Support for multiple authorities is handled by individual plugins.
:param issuer_options:
"""
authority = issuer_options["authority"]
issuer = plugins.get(authority.plugin_name)
# allow the CSR to be specified by the user
if not issuer_options.get("csr"):
csr, private_key = create_csr(issuer_options)
else:
csr = str(issuer_options.get("csr"))
private_key = None
issuer_options["creator"] = g.user.email
cert_body, cert_chain = issuer.create_certificate(csr, issuer_options)
cert = Certificate(cert_body, private_key, cert_chain)
cert.user = g.user
cert.authority = authority
database.update(cert)
return (
cert,
private_key,
cert_chain,
)
|
def mint(issuer_options):
"""
Minting is slightly different for each authority.
Support for multiple authorities is handled by individual plugins.
:param issuer_options:
"""
authority = issuer_options["authority"]
issuer = plugins.get(authority.plugin_name)
# allow the CSR to be specified by the user
if not issuer_options.get("csr"):
csr, private_key = create_csr(issuer_options)
else:
csr = issuer_options.get("csr")
private_key = None
issuer_options["creator"] = g.user.email
cert_body, cert_chain = issuer.create_certificate(csr, issuer_options)
cert = Certificate(cert_body, private_key, cert_chain)
cert.user = g.user
cert.authority = authority
database.update(cert)
return (
cert,
private_key,
cert_chain,
)
|
https://github.com/Netflix/lemur/issues/246
|
2016-02-19 21:19:04,405 ERROR: 45 [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/certificates/views.py", line 387, in post
return service.create(**args)
File "/apps/lemur/lemur/certificates/service.py", line 247, in create
cert, private_key, cert_chain = mint(kwargs)
File "/apps/lemur/lemur/certificates/service.py", line 151, in mint
cert_body, cert_chain = issuer.create_certificate(csr, issuer_options)
File "/apps/lemur/lemur/plugins/lemur_verisign/plugin.py", line 156, in create_certificate
response = self.session.post(url, data=data)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 511, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 454, in request
prep = self.prepare_request(req)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 388, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 296, in prepare
self.prepare_body(data, files, json)
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 450, in prepare_body
body = self._encode_params(data)
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 97, in _encode_params
return urlencode(result, doseq=True)
File "/usr/lib/python2.7/urllib.py", line 1338, in urlencode
v = quote_plus(v)
File "/usr/lib/python2.7/urllib.py", line 1293, in quote_plus
s = quote(s, safe + ' ')
File "/usr/lib/python2.7/urllib.py", line 1288, in quote
return ''.join(map(quoter, s))
KeyError: 45
|
KeyError
|
def post(self):
"""
.. http:post:: /certificates
Creates a new certificate
**Example request**:
.. sourcecode:: http
POST /certificates HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"country": "US",
"state": "CA",
"location": "A Place",
"organization": "ExampleInc.",
"organizationalUnit": "Operations",
"owner": "bob@example.com",
"description": "test",
"selectedAuthority": "timetest2",
"csr": "----BEGIN CERTIFICATE REQUEST-----...",
"authority": {
"body": "-----BEGIN...",
"name": "timetest2",
"chain": "",
"notBefore": "2015-06-05T15:20:59",
"active": true,
"id": 50,
"notAfter": "2015-06-17T15:21:08",
"description": "dsfdsf"
},
"notifications": [
{
"description": "Default 30 day expiration notification",
"notificationOptions": [
{
"name": "interval",
"required": true,
"value": 30,
"helpMessage": "Number of days to be alert before expiration.",
"validation": "^\\d+$",
"type": "int"
},
{
"available": [
"days",
"weeks",
"months"
],
"name": "unit",
"required": true,
"value": "days",
"helpMessage": "Interval unit",
"validation": "",
"type": "select"
},
{
"name": "recipients",
"required": true,
"value": "bob@example.com",
"helpMessage": "Comma delimited list of email addresses",
"validation": "^([\\w+-.%]+@[\\w-.]+\\.[A-Za-z]{2,4},?)+$",
"type": "str"
}
],
"label": "DEFAULT_KGLISSON_30_DAY",
"pluginName": "email-notification",
"active": true,
"id": 7
}
],
"extensions": {
"basicConstraints": {},
"keyUsage": {
"isCritical": true,
"useKeyEncipherment": true,
"useDigitalSignature": true
},
"extendedKeyUsage": {
"isCritical": true,
"useServerAuthentication": true
},
"subjectKeyIdentifier": {
"includeSKI": true
},
"subAltNames": {
"names": []
}
},
"commonName": "test",
"validityStart": "2015-06-05T07:00:00.000Z",
"validityEnd": "2015-06-16T07:00:00.000Z",
"replacements": [
{'id': 123}
]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"id": 1,
"name": "cert1",
"description": "this is cert1",
"bits": 2048,
"deleted": false,
"issuer": "ExampeInc.",
"serial": "123450",
"chain": "-----Begin ...",
"body": "-----Begin ...",
"san": true,
"owner": "jimbob@example.com",
"active": false,
"notBefore": "2015-06-05T17:09:39",
"notAfter": "2015-06-10T17:09:39",
"cn": "example.com",
"status": "unknown"
}
:arg extensions: extensions to be used in the certificate
:arg description: description for new certificate
:arg owner: owner email
:arg validityStart: when the certificate should start being valid
:arg validityEnd: when the certificate should expire
:arg authority: authority that should issue the certificate
:arg country: country for the CSR
:arg state: state for the CSR
:arg location: location for the CSR
:arg organization: organization for CSR
:arg commonName: certiifcate common name
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
self.reqparse.add_argument("extensions", type=dict, location="json")
self.reqparse.add_argument("destinations", type=list, default=[], location="json")
self.reqparse.add_argument("notifications", type=list, default=[], location="json")
self.reqparse.add_argument("replacements", type=list, default=[], location="json")
self.reqparse.add_argument(
"validityStart", type=str, location="json"
) # TODO validate
self.reqparse.add_argument(
"validityEnd", type=str, location="json"
) # TODO validate
self.reqparse.add_argument(
"authority", type=valid_authority, location="json", required=True
)
self.reqparse.add_argument("description", type=str, location="json")
self.reqparse.add_argument("country", type=str, location="json", required=True)
self.reqparse.add_argument("state", type=str, location="json", required=True)
self.reqparse.add_argument("location", type=str, location="json", required=True)
self.reqparse.add_argument("organization", type=str, location="json", required=True)
self.reqparse.add_argument(
"organizationalUnit", type=str, location="json", required=True
)
self.reqparse.add_argument("owner", type=str, location="json", required=True)
self.reqparse.add_argument("commonName", type=str, location="json", required=True)
self.reqparse.add_argument("csr", type=str, location="json")
args = self.reqparse.parse_args()
authority = args["authority"]
role = role_service.get_by_name(authority.owner)
# all the authority role members should be allowed
roles = [x.name for x in authority.roles]
# allow "owner" roles by team DL
roles.append(role)
authority_permission = AuthorityPermission(authority.id, roles)
if authority_permission.can():
# if we are not admins lets make sure we aren't issuing anything sensitive
if not SensitiveDomainPermission().can():
check_sensitive_domains(get_domains_from_options(args))
return service.create(**args)
return dict(
message="You are not authorized to use {0}".format(args["authority"].name)
), 403
|
def post(self):
"""
.. http:post:: /certificates
Creates a new certificate
**Example request**:
.. sourcecode:: http
POST /certificates HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"country": "US",
"state": "CA",
"location": "A Place",
"organization": "ExampleInc.",
"organizationalUnit": "Operations",
"owner": "bob@example.com",
"description": "test",
"selectedAuthority": "timetest2",
"csr",
"authority": {
"body": "-----BEGIN...",
"name": "timetest2",
"chain": "",
"notBefore": "2015-06-05T15:20:59",
"active": true,
"id": 50,
"notAfter": "2015-06-17T15:21:08",
"description": "dsfdsf"
},
"notifications": [
{
"description": "Default 30 day expiration notification",
"notificationOptions": [
{
"name": "interval",
"required": true,
"value": 30,
"helpMessage": "Number of days to be alert before expiration.",
"validation": "^\\d+$",
"type": "int"
},
{
"available": [
"days",
"weeks",
"months"
],
"name": "unit",
"required": true,
"value": "days",
"helpMessage": "Interval unit",
"validation": "",
"type": "select"
},
{
"name": "recipients",
"required": true,
"value": "bob@example.com",
"helpMessage": "Comma delimited list of email addresses",
"validation": "^([\\w+-.%]+@[\\w-.]+\\.[A-Za-z]{2,4},?)+$",
"type": "str"
}
],
"label": "DEFAULT_KGLISSON_30_DAY",
"pluginName": "email-notification",
"active": true,
"id": 7
}
],
"extensions": {
"basicConstraints": {},
"keyUsage": {
"isCritical": true,
"useKeyEncipherment": true,
"useDigitalSignature": true
},
"extendedKeyUsage": {
"isCritical": true,
"useServerAuthentication": true
},
"subjectKeyIdentifier": {
"includeSKI": true
},
"subAltNames": {
"names": []
}
},
"commonName": "test",
"validityStart": "2015-06-05T07:00:00.000Z",
"validityEnd": "2015-06-16T07:00:00.000Z",
"replacements": [
{'id': 123}
]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"id": 1,
"name": "cert1",
"description": "this is cert1",
"bits": 2048,
"deleted": false,
"issuer": "ExampeInc.",
"serial": "123450",
"chain": "-----Begin ...",
"body": "-----Begin ...",
"san": true,
"owner": "jimbob@example.com",
"active": false,
"notBefore": "2015-06-05T17:09:39",
"notAfter": "2015-06-10T17:09:39",
"cn": "example.com",
"status": "unknown"
}
:arg extensions: extensions to be used in the certificate
:arg description: description for new certificate
:arg owner: owner email
:arg validityStart: when the certificate should start being valid
:arg validityEnd: when the certificate should expire
:arg authority: authority that should issue the certificate
:arg country: country for the CSR
:arg state: state for the CSR
:arg location: location for the CSR
:arg organization: organization for CSR
:arg commonName: certiifcate common name
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
self.reqparse.add_argument("extensions", type=dict, location="json")
self.reqparse.add_argument("destinations", type=list, default=[], location="json")
self.reqparse.add_argument("notifications", type=list, default=[], location="json")
self.reqparse.add_argument("replacements", type=list, default=[], location="json")
self.reqparse.add_argument(
"validityStart", type=str, location="json"
) # TODO validate
self.reqparse.add_argument(
"validityEnd", type=str, location="json"
) # TODO validate
self.reqparse.add_argument(
"authority", type=valid_authority, location="json", required=True
)
self.reqparse.add_argument("description", type=str, location="json")
self.reqparse.add_argument("country", type=str, location="json", required=True)
self.reqparse.add_argument("state", type=str, location="json", required=True)
self.reqparse.add_argument("location", type=str, location="json", required=True)
self.reqparse.add_argument("organization", type=str, location="json", required=True)
self.reqparse.add_argument(
"organizationalUnit", type=str, location="json", required=True
)
self.reqparse.add_argument("owner", type=str, location="json", required=True)
self.reqparse.add_argument("commonName", type=str, location="json", required=True)
self.reqparse.add_argument("csr", type=str, location="json")
args = self.reqparse.parse_args()
authority = args["authority"]
role = role_service.get_by_name(authority.owner)
# all the authority role members should be allowed
roles = [x.name for x in authority.roles]
# allow "owner" roles by team DL
roles.append(role)
authority_permission = AuthorityPermission(authority.id, roles)
if authority_permission.can():
# if we are not admins lets make sure we aren't issuing anything sensitive
if not SensitiveDomainPermission().can():
check_sensitive_domains(get_domains_from_options(args))
return service.create(**args)
return dict(
message="You are not authorized to use {0}".format(args["authority"].name)
), 403
|
https://github.com/Netflix/lemur/issues/246
|
2016-02-19 21:19:04,405 ERROR: 45 [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/certificates/views.py", line 387, in post
return service.create(**args)
File "/apps/lemur/lemur/certificates/service.py", line 247, in create
cert, private_key, cert_chain = mint(kwargs)
File "/apps/lemur/lemur/certificates/service.py", line 151, in mint
cert_body, cert_chain = issuer.create_certificate(csr, issuer_options)
File "/apps/lemur/lemur/plugins/lemur_verisign/plugin.py", line 156, in create_certificate
response = self.session.post(url, data=data)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 511, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 454, in request
prep = self.prepare_request(req)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 388, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 296, in prepare
self.prepare_body(data, files, json)
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 450, in prepare_body
body = self._encode_params(data)
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 97, in _encode_params
return urlencode(result, doseq=True)
File "/usr/lib/python2.7/urllib.py", line 1338, in urlencode
v = quote_plus(v)
File "/usr/lib/python2.7/urllib.py", line 1293, in quote_plus
s = quote(s, safe + ' ')
File "/usr/lib/python2.7/urllib.py", line 1288, in quote
return ''.join(map(quoter, s))
KeyError: 45
|
KeyError
|
def put(self, certificate_id):
"""
.. http:put:: /certificates/1
Update a certificate
**Example request**:
.. sourcecode:: http
PUT /certificates/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"owner": "jimbob@example.com",
"active": false
"notifications": [],
"destinations": []
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"id": 1,
"name": "cert1",
"description": "this is cert1",
"bits": 2048,
"deleted": false,
"issuer": "ExampeInc.",
"serial": "123450",
"chain": "-----Begin ...",
"body": "-----Begin ...",
"san": true,
"owner": "jimbob@example.com",
"active": false,
"notBefore": "2015-06-05T17:09:39",
"notAfter": "2015-06-10T17:09:39",
"cn": "example.com",
"status": "unknown",
}
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
self.reqparse.add_argument("active", type=bool, location="json")
self.reqparse.add_argument("owner", type=str, location="json")
self.reqparse.add_argument("description", type=str, location="json")
self.reqparse.add_argument("destinations", type=list, default=[], location="json")
self.reqparse.add_argument("notifications", type=list, default=[], location="json")
args = self.reqparse.parse_args()
cert = service.get(certificate_id)
role = role_service.get_by_name(cert.owner)
permission = UpdateCertificatePermission(
certificate_id, getattr(role, "name", None)
)
if permission.can():
return service.update(
certificate_id,
args["owner"],
args["description"],
args["active"],
args["destinations"],
args["notifications"],
)
return dict(message="You are not authorized to update this certificate"), 403
|
def put(self, certificate_id):
"""
.. http:put:: /certificates/1
Update a certificate
**Example request**:
.. sourcecode:: http
PUT /certificates/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"owner": "jimbob@example.com",
"active": false
"notifications": [],
"destinations": []
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"id": 1,
"name": "cert1",
"description": "this is cert1",
"bits": 2048,
"deleted": false,
"issuer": "ExampeInc.",
"serial": "123450",
"chain": "-----Begin ...",
"body": "-----Begin ...",
"san": true,
"owner": "jimbob@example.com",
"active": false,
"notBefore": "2015-06-05T17:09:39",
"notAfter": "2015-06-10T17:09:39",
"cn": "example.com",
"status": "unknown",
}
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
:statuscode 403: unauthenticated
"""
self.reqparse.add_argument("active", type=bool, location="json")
self.reqparse.add_argument("owner", type=str, location="json")
self.reqparse.add_argument("description", type=str, location="json")
self.reqparse.add_argument("destinations", type=list, default=[], location="json")
self.reqparse.add_argument("notifications", type=list, default=[], location="json")
args = self.reqparse.parse_args()
cert = service.get(certificate_id)
role = role_service.get_by_name(cert.owner)
permission = UpdateCertificatePermission(certificate_id, role.name)
if permission.can():
return service.update(
certificate_id,
args["owner"],
args["description"],
args["active"],
args["destinations"],
args["notifications"],
)
return dict(message="You are not authorized to update this certificate"), 403
|
https://github.com/Netflix/lemur/issues/55
|
2015-08-26 20:33:36,751 ERROR: 'NoneType' object has no attribute 'name' [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/certificates/views.py", line 575, in put
permission = UpdateCertificatePermission(certificate_id, role.name)
AttributeError: 'NoneType' object has no attribute 'name'
2015-08-26 20:34:08,236 ERROR: 'NoneType' object has no attribute 'name' [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/certificates/views.py", line 575, in put
permission = UpdateCertificatePermission(certificate_id, role.name)
AttributeError: 'NoneType' object has no attribute 'name'
2015-08-26 20:37:19,147 ERROR: 'NoneType' object has no attribute 'name' [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/certificates/views.py", line 575, in put
permission = UpdateCertificatePermission(certificate_id, role.name)
AttributeError: 'NoneType' object has no attribute 'name'
|
AttributeError
|
async def graphql_http_server(self, request: Request) -> Response:
try:
data = await self.extract_data_from_request(request)
except HttpError as error:
return PlainTextResponse(error.message or error.status, status_code=400)
context_value = await self.get_context_for_request(request)
extensions = await self.get_extensions_for_request(request, context_value)
middleware = await self.get_middleware_for_request(request, context_value)
success, response = await graphql(
self.schema,
data,
context_value=context_value,
root_value=self.root_value,
debug=self.debug,
logger=self.logger,
error_formatter=self.error_formatter,
extensions=extensions,
middleware=middleware,
)
status_code = 200 if success else 400
return JSONResponse(response, status_code=status_code)
|
async def graphql_http_server(self, request: Request) -> Response:
try:
data = await self.extract_data_from_request(request)
except HttpError as error:
return PlainTextResponse(error.message or error.status, status_code=400)
success, response = await self.execute_graphql_query(request, data)
status_code = 200 if success else 400
return JSONResponse(response, status_code=status_code)
|
https://github.com/mirumee/ariadne/issues/320
|
Traceback (most recent call last):",
File \"/Users/...REDACTED.../virtualenvs/graphql-Pfr5HTvn/lib/python3.7/site-packages/graphql/execution/execute.py\", line 625, in resolve_field_value_or_error",
result = resolve_fn(source, info, **args)",
File \"/Users/...REDACTED.../app/__init__.py\", line 26, in counter_resolver",
return count + 1",
TypeError: unsupported operand type(s) for +: 'NoneType' and 'int'"
|
TypeError
|
async def handle_websocket_message(
self,
message: dict,
websocket: WebSocket,
subscriptions: Dict[str, AsyncGenerator],
):
operation_id = cast(str, message.get("id"))
message_type = cast(str, message.get("type"))
if message_type == GQL_CONNECTION_INIT:
await websocket.send_json({"type": GQL_CONNECTION_ACK})
asyncio.ensure_future(self.keep_websocket_alive(websocket))
elif message_type == GQL_CONNECTION_TERMINATE:
await websocket.close()
elif message_type == GQL_START:
await self.start_websocket_subscription(
message.get("payload"), operation_id, websocket, subscriptions
)
elif message_type == GQL_STOP:
if operation_id in subscriptions:
await subscriptions[operation_id].aclose()
del subscriptions[operation_id]
|
async def handle_websocket_message(
self,
message: dict,
websocket: WebSocket,
subscriptions: Dict[str, AsyncGenerator],
):
operation_id = cast(str, message.get("id"))
message_type = cast(str, message.get("type"))
if message_type == GQL_CONNECTION_INIT:
await websocket.send_json({"type": GQL_CONNECTION_ACK})
asyncio.ensure_future(self.keep_websocket_alive(websocket))
elif message_type == GQL_CONNECTION_TERMINATE:
await websocket.close()
elif message_type == GQL_START:
await self.process_single_message(websocket, subscriptions, message)
elif message_type == GQL_STOP:
if operation_id in subscriptions:
await subscriptions[operation_id].aclose()
del subscriptions[operation_id]
|
https://github.com/mirumee/ariadne/issues/320
|
Traceback (most recent call last):",
File \"/Users/...REDACTED.../virtualenvs/graphql-Pfr5HTvn/lib/python3.7/site-packages/graphql/execution/execute.py\", line 625, in resolve_field_value_or_error",
result = resolve_fn(source, info, **args)",
File \"/Users/...REDACTED.../app/__init__.py\", line 26, in counter_resolver",
return count + 1",
TypeError: unsupported operand type(s) for +: 'NoneType' and 'int'"
|
TypeError
|
def conv2d_winograd_nhwc_cuda(
data, weight, strides, padding, dilation, out_dtype, pre_computed=False
):
"""Conv2D Winograd in NHWC layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
"""
tile_size = _infer_tile_size(data, weight, layout="NHWC")
return _conv2d_winograd_nhwc_impl(
data, weight, strides, padding, dilation, out_dtype, tile_size, pre_computed
)
|
def conv2d_winograd_nhwc_cuda(
data, weight, strides, padding, dilation, out_dtype, pre_computed=False
):
"""Conv2D Winograd in NHWC layout.
This is a clean version to be used by the auto-scheduler for both CPU and GPU.
"""
tile_size = _infer_tile_size(data, weight)
return _conv2d_winograd_nhwc_impl(
data, weight, strides, padding, dilation, out_dtype, tile_size, pre_computed
)
|
https://github.com/apache/tvm/issues/7090
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/cody-tvm/python/tvm/auto_scheduler/relay_integration.py", line 61, in call_all_topi_funcs
grc.codegen(opt_mod["main"])
File "/home/ubuntu/cody-tvm/python/tvm/relay/backend/graph_runtime_codegen.py", line 83, in codegen
self._codegen(func)
File "tvm/_ffi/_cython/./packed_func.pxi", line 321, in tvm._ffi._cy3.core.PackedFuncBase.__call__
File "tvm/_ffi/_cython/./packed_func.pxi", line 256, in tvm._ffi._cy3.core.FuncCall
File "tvm/_ffi/_cython/./packed_func.pxi", line 245, in tvm._ffi._cy3.core.FuncCall3
File "tvm/_ffi/_cython/./base.pxi", line 160, in tvm._ffi._cy3.core.CALL
tvm._ffi.base.TVMError: Traceback (most recent call last):
[bt] (8) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::backend::MemoizedExprTranslator<tvm::runtime::Array<tvm::te::Tensor, void> >::VisitExpr(tvm::RelayExpr const&)+0xa9) [0x7fce58f91e69]
[bt] (7) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)+0x82) [0x7fce58f91be2]
[bt] (6) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>::InitVTable()::{lambda(tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>*)#6}::_FUN(tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>*)+0x27) [0x7fce58f832e7]
[bt] (5) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::ScheduleGetter::VisitExpr_(tvm::relay::CallNode const*)+0x14f) [0x7fce58f89a8f]
[bt] (4) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::backend::MemoizedExprTranslator<tvm::runtime::Array<tvm::te::Tensor, void> >::VisitExpr(tvm::RelayExpr const&)+0xa9) [0x7fce58f91e69]
[bt] (3) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>::VisitExpr(tvm::RelayExpr const&)+0x82) [0x7fce58f91be2]
[bt] (2) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>::InitVTable()::{lambda(tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>*)#6}::_FUN(tvm::runtime::ObjectRef const&, tvm::relay::ExprFunctor<tvm::runtime::Array<tvm::te::Tensor, void> (tvm::RelayExpr const&)>*)+0x27) [0x7fce58f832e7]
[bt] (1) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::ScheduleGetter::VisitExpr_(tvm::relay::CallNode const*)+0x714) [0x7fce58f8a054]
[bt] (0) /home/ubuntu/cody-tvm/build/libtvm.so(+0x1230f0b) [0x7fce5913cf0b]
File "tvm/_ffi/_cython/./packed_func.pxi", line 55, in tvm._ffi._cy3.core.tvm_callback
File "/home/ubuntu/cody-tvm/python/tvm/relay/backend/compile_engine.py", line 300, in lower_call
best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)
File "/home/ubuntu/cody-tvm/python/tvm/relay/backend/compile_engine.py", line 205, in select_implementation
outs = best_plevel_impl.compute(attrs, inputs, out_type)
File "/home/ubuntu/cody-tvm/python/tvm/relay/op/op.py", line 90, in compute
return _OpImplementationCompute(self, attrs, inputs, out_type)
File "tvm/_ffi/_cython/./packed_func.pxi", line 321, in tvm._ffi._cy3.core.PackedFuncBase.__call__
File "tvm/_ffi/_cython/./packed_func.pxi", line 266, in tvm._ffi._cy3.core.FuncCall
File "tvm/_ffi/_cython/./base.pxi", line 160, in tvm._ffi._cy3.core.CALL
[bt] (3) /home/ubuntu/cody-tvm/build/libtvm.so(TVMFuncCall+0x65) [0x7fce59140485]
[bt] (2) /home/ubuntu/cody-tvm/build/libtvm.so(+0x1140ca8) [0x7fce5904cca8]
[bt] (1) /home/ubuntu/cody-tvm/build/libtvm.so(tvm::relay::OpImplementation::Compute(tvm::Attrs const&, tvm::runtime::Array<tvm::te::Tensor, void> const&, tvm::Type const&)+0xb1) [0x7fce5904ca71]
[bt] (0) /home/ubuntu/cody-tvm/build/libtvm.so(+0x1230f0b) [0x7fce5913cf0b]
File "tvm/_ffi/_cython/./packed_func.pxi", line 55, in tvm._ffi._cy3.core.tvm_callback
File "/home/ubuntu/cody-tvm/python/tvm/relay/op/strategy/generic.py", line 214, in _compute_conv2d
return [topi_compute(*args)]
File "/home/ubuntu/cody-tvm/python/tvm/topi/nn/conv2d.py", line 1191, in conv2d_winograd_nhwc_without_weight_transform
data, weight, strides, padding, dilation, out_dtype, pre_computed=True
File "<decorator-gen-57>", line 2, in conv2d_winograd_nhwc
File "/home/ubuntu/cody-tvm/python/tvm/target/generic_func.py", line 275, in dispatch_func
return dispatch_dict[k](*args, **kwargs)
File "/home/ubuntu/cody-tvm/python/tvm/topi/cuda/conv2d_winograd.py", line 373, in conv2d_winograd_nhwc_cuda
data, weight, strides, padding, dilation, out_dtype, tile_size, pre_computed
File "/home/ubuntu/cody-tvm/python/tvm/topi/nn/conv2d.py", line 1036, in _conv2d_winograd_nhwc_impl
assert HSTR == 1 and WSTR == 1 and KH == 3 and KW == 3
TVMError: AssertionError
Get devices for measurement successfully!
Traceback (most recent call last):
File "tests/python/relay/test_auto_scheduler_tuning.py", line 68, in <module>
test_tuning_cuda()
File "tests/python/relay/test_auto_scheduler_tuning.py", line 64, in test_tuning_cuda
tune_network("winograd-test", "cuda")
File "tests/python/relay/test_auto_scheduler_tuning.py", line 37, in tune_network
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
File "/home/ubuntu/cody-tvm/python/tvm/auto_scheduler/task_scheduler.py", line 234, in __init__
assert len(self.tasks) != 0, "No tasks"
AssertionError: No tasks
|
tvm._ffi.base.TVMError
|
async def _run_backfill(self) -> None:
await self._begin_backfill.wait()
if self._next_trie_root_hash is None:
raise RuntimeError(
"Cannot start backfill when a recent trie root hash is unknown"
)
loop = asyncio.get_event_loop()
while self.manager.is_running:
# Collect node hashes that might be missing; enough for a single request.
# Collect batch before asking for peer, because we don't want to hold the
# peer idle, for a long time.
required_data = await loop.run_in_executor(None, self._batch_of_missing_hashes)
if len(required_data) == 0:
# Nothing available to request, for one of two reasons:
if self._check_complete():
self.logger.info("Downloaded all accounts, storage and bytecode state")
return
else:
# There are active requests to peers, and we don't have enough information to
# ask for any more trie nodes (for example, near the beginning, when the top
# of the trie isn't available).
self.logger.debug("Backfill is waiting for more hashes to arrive")
await asyncio.sleep(PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED)
continue
await asyncio.wait(
(
self._external_peasant_usage.until_silence(),
self.manager.wait_finished(),
),
return_when=asyncio.FIRST_COMPLETED,
)
if not self.manager.is_running:
break
peer = await self._queening_queue.pop_fastest_peasant()
# skip over peer if it has an active data request
while peer.eth_api.get_node_data.is_requesting:
self.logger.debug(
"Want backfill nodes from %s, but it has an active request, skipping...",
peer,
)
# Put this peer back
self._queening_queue.insert_peer(peer, NON_IDEAL_RESPONSE_PENALTY)
# Ask for the next peer
peer = await self._queening_queue.pop_fastest_peasant()
self.manager.run_task(self._make_request, peer, required_data)
|
async def _run_backfill(self) -> None:
await self._begin_backfill.wait()
if self._next_trie_root_hash is None:
raise RuntimeError(
"Cannot start backfill when a recent trie root hash is unknown"
)
loop = asyncio.get_event_loop()
while self.manager.is_running:
# Collect node hashes that might be missing; enough for a single request.
# Collect batch before asking for peer, because we don't want to hold the
# peer idle, for a long time.
required_data = await loop.run_in_executor(None, self._batch_of_missing_hashes)
if len(required_data) == 0:
# Nothing available to request, for one of two reasons:
if self._check_complete():
self.logger.info("Downloaded all accounts, storage and bytecode state")
return
else:
# There are active requests to peers, and we don't have enough information to
# ask for any more trie nodes (for example, near the beginning, when the top
# of the trie isn't available).
self.logger.debug("Backfill is waiting for more hashes to arrive")
await asyncio.sleep(PAUSE_SECONDS_IF_STATE_BACKFILL_STARVED)
continue
await asyncio.wait(
(
self._external_peasant_usage.until_silence(),
self.manager.wait_finished(),
),
return_when=asyncio.FIRST_COMPLETED,
)
if not self.manager.is_running:
break
peer = await self._queening_queue.pop_fastest_peasant()
self.manager.run_task(self._make_request, peer, required_data)
|
https://github.com/ethereum/trinity/issues/2008
|
f2b7-40b4-a77a-6ba6d13da819>: Timed out waiting for NodeDataV65 request lock or connection: Connection-<Session <Node(0xd4062d@91.13.194.238)> 1c231c61-f2b7-40b4-a77a-6ba6d13da819>�[0m
DEBUG 2020-09-02 12:59:58,686 BeamDownloader Problem downloading nodes from peer, dropping...
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/exchange/candidate_stream.py", line 81, in payload_candidates
timeout=total_timeout * NUM_QUEUED_REQUESTS,
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 449, in wait_for
raise futures.TimeoutError()
concurrent.futures._base.TimeoutError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 681, in _request_nodes
node_hashes, timeout=self._reply_timeout)
File "/usr/src/app/trinity/trinity/protocol/eth/exchanges.py", line 110, in __call__
timeout,
File "/usr/src/app/trinity/p2p/exchange/exchange.py", line 73, in get_result
timeout,
File "/usr/src/app/trinity/p2p/exchange/manager.py", line 59, in get_result
async for payload in stream.payload_candidates(request, tracker, timeout=timeout):
File "/usr/src/app/trinity/p2p/exchange/candidate_stream.py", line 85, in payload_candidates
f"Timed out waiting for {self.response_cmd_name} request lock "
p2p.exceptions.ConnectionBusy: Timed out waiting for NodeDataV65 request lock or connection: Connection-<Session <Node(0xd4062d@91.13.194.238)> 1c231c61-f2b7-40b4-a77a-6ba6d13da819>
|
p2p.exceptions.ConnectionBusy
|
async def _match_predictive_node_requests_to_peers(self) -> None:
"""
Monitor for predictive nodes. These might be required by future blocks. They might not,
because we run a speculative execution which might follow a different code path than
the final block import does.
When predictive nodes are queued up, ask the fastest available peasant (non-queen) peer
for them. Without waiting for a response from the peer, continue and check if more
predictive trie nodes are requested. Repeat indefinitely.
"""
while self.manager.is_running:
try:
batch_id, hashes = await asyncio.wait_for(
self._maybe_useful_nodes.get(eth_constants.MAX_STATE_FETCH),
timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,
)
except asyncio.TimeoutError:
# Reduce the number of predictive peers, we seem to have plenty
if self._min_predictive_peers > 0:
self._min_predictive_peers -= 1
self.logger.debug(
"Decremented predictive peers to %d",
self._min_predictive_peers,
)
# Re-attempt
continue
try:
peer = await asyncio.wait_for(
self._queen_tracker.pop_fastest_peasant(),
timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,
)
except asyncio.TimeoutError:
# Increase the minimum number of predictive peers, we seem to not have enough
new_predictive_peers = min(
self._min_predictive_peers + 1,
# Don't reserve more than half the peers for prediction
self._num_peers // 2,
)
if new_predictive_peers != self._min_predictive_peers:
self.logger.debug(
"Updating predictive peer count from %d to %d",
self._min_predictive_peers,
new_predictive_peers,
)
self._min_predictive_peers = new_predictive_peers
cancel_attempt = True
else:
if peer.eth_api.get_node_data.is_requesting:
self.logger.debug(
"Want predictive nodes from %s, but it has an active request, skipping...",
peer,
)
self._queen_tracker.insert_peer(peer, NON_IDEAL_RESPONSE_PENALTY)
cancel_attempt = True
else:
cancel_attempt = False
if cancel_attempt:
# Prepare to restart
await self._maybe_useful_nodes.complete(batch_id, ())
continue
self._num_predictive_requests_by_peer[peer] += 1
self._predictive_requests += 1
self.manager.run_task(
self._get_predictive_nodes_from_peer,
peer,
hashes,
batch_id,
)
|
async def _match_predictive_node_requests_to_peers(self) -> None:
"""
Monitor for predictive nodes. These might be required by future blocks. They might not,
because we run a speculative execution which might follow a different code path than
the final block import does.
When predictive nodes are queued up, ask the fastest available peasant (non-queen) peer
for them. Without waiting for a response from the peer, continue and check if more
predictive trie nodes are requested. Repeat indefinitely.
"""
while self.manager.is_running:
try:
batch_id, hashes = await asyncio.wait_for(
self._maybe_useful_nodes.get(eth_constants.MAX_STATE_FETCH),
timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,
)
except asyncio.TimeoutError:
# Reduce the number of predictive peers, we seem to have plenty
if self._min_predictive_peers > 0:
self._min_predictive_peers -= 1
self.logger.debug(
"Decremented predictive peers to %d",
self._min_predictive_peers,
)
# Re-attempt
continue
try:
peer = await asyncio.wait_for(
self._queen_tracker.pop_fastest_peasant(),
timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,
)
except asyncio.TimeoutError:
# Increase the minimum number of predictive peers, we seem to not have enough
new_predictive_peers = min(
self._min_predictive_peers + 1,
# Don't reserve more than half the peers for prediction
self._num_peers // 2,
)
if new_predictive_peers != self._min_predictive_peers:
self.logger.debug(
"Updating predictive peer count from %d to %d",
self._min_predictive_peers,
new_predictive_peers,
)
self._min_predictive_peers = new_predictive_peers
# Prepare to restart
await self._maybe_useful_nodes.complete(batch_id, ())
continue
self._num_predictive_requests_by_peer[peer] += 1
self._predictive_requests += 1
self.manager.run_task(
self._get_predictive_nodes_from_peer,
peer,
hashes,
batch_id,
)
|
https://github.com/ethereum/trinity/issues/2008
|
f2b7-40b4-a77a-6ba6d13da819>: Timed out waiting for NodeDataV65 request lock or connection: Connection-<Session <Node(0xd4062d@91.13.194.238)> 1c231c61-f2b7-40b4-a77a-6ba6d13da819>�[0m
DEBUG 2020-09-02 12:59:58,686 BeamDownloader Problem downloading nodes from peer, dropping...
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/exchange/candidate_stream.py", line 81, in payload_candidates
timeout=total_timeout * NUM_QUEUED_REQUESTS,
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 449, in wait_for
raise futures.TimeoutError()
concurrent.futures._base.TimeoutError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 681, in _request_nodes
node_hashes, timeout=self._reply_timeout)
File "/usr/src/app/trinity/trinity/protocol/eth/exchanges.py", line 110, in __call__
timeout,
File "/usr/src/app/trinity/p2p/exchange/exchange.py", line 73, in get_result
timeout,
File "/usr/src/app/trinity/p2p/exchange/manager.py", line 59, in get_result
async for payload in stream.payload_candidates(request, tracker, timeout=timeout):
File "/usr/src/app/trinity/p2p/exchange/candidate_stream.py", line 85, in payload_candidates
f"Timed out waiting for {self.response_cmd_name} request lock "
p2p.exceptions.ConnectionBusy: Timed out waiting for NodeDataV65 request lock or connection: Connection-<Session <Node(0xd4062d@91.13.194.238)> 1c231c61-f2b7-40b4-a77a-6ba6d13da819>
|
p2p.exceptions.ConnectionBusy
|
async def add(self, tasks: Tuple[TTask, ...]) -> None:
"""
add() will insert as many tasks as can be inserted until the queue fills up.
Then it will pause until the queue is no longer full, and continue adding tasks.
It will finally return when all tasks have been inserted.
"""
if not isinstance(tasks, tuple):
raise ValidationError(f"must pass a tuple of tasks to add(), but got {tasks!r}")
already_pending = self._tasks.intersection(tasks)
if already_pending:
raise ValidationError(
f"Duplicate tasks detected: {already_pending!r} are already present in the queue"
)
# make sure to insert the highest-priority items first, in case queue fills up
remaining = tuple(sorted(map(self._task_wrapper, tasks)))
while remaining:
num_tasks = len(self._tasks)
if self._maxsize <= 0:
# no cap at all, immediately insert all tasks
open_slots = len(remaining)
elif num_tasks < self._maxsize:
# there is room to add at least one more task
open_slots = self._maxsize - num_tasks
else:
# wait until there is room in the queue
await self._full_lock.acquire()
# the current number of tasks has changed, restart attempt
continue
queueing, remaining = remaining[:open_slots], remaining[open_slots:]
for task in queueing:
await self._open_queue.put(task)
original_queued = tuple(task.original for task in queueing)
self._tasks.update(original_queued)
if self._full_lock.locked() and len(self._tasks) < self._maxsize:
self._full_lock.release()
|
async def add(self, tasks: Tuple[TTask, ...]) -> None:
"""
add() will insert as many tasks as can be inserted until the queue fills up.
Then it will pause until the queue is no longer full, and continue adding tasks.
It will finally return when all tasks have been inserted.
"""
if not isinstance(tasks, tuple):
raise ValidationError(f"must pass a tuple of tasks to add(), but got {tasks!r}")
already_pending = self._tasks.intersection(tasks)
if already_pending:
raise ValidationError(
f"Duplicate tasks detected: {already_pending!r} are already present in the queue"
)
# make sure to insert the highest-priority items first, in case queue fills up
remaining = tuple(sorted(map(self._task_wrapper, tasks)))
while remaining:
num_tasks = len(self._tasks)
if self._maxsize <= 0:
# no cap at all, immediately insert all tasks
open_slots = len(remaining)
elif num_tasks < self._maxsize:
# there is room to add at least one more task
open_slots = self._maxsize - num_tasks
else:
# wait until there is room in the queue
await self._full_lock.acquire()
# the current number of tasks has changed, restart attempt
continue
queueing, remaining = remaining[:open_slots], remaining[open_slots:]
for task in queueing:
# There will always be room in _open_queue until _maxsize is reached
try:
self._open_queue.put_nowait(task)
except asyncio.QueueFull as exc:
task_idx = queueing.index(task)
qsize = self._open_queue.qsize()
raise asyncio.QueueFull(
f"TaskQueue unsuccessful in adding task {task.original!r} ",
f"because qsize={qsize}, "
f"num_tasks={num_tasks}, maxsize={self._maxsize}, open_slots={open_slots}, "
f"num queueing={len(queueing)}, len(_tasks)={len(self._tasks)}, task_idx="
f"{task_idx}, queuing={queueing}, original msg: {exc}",
)
original_queued = tuple(task.original for task in queueing)
self._tasks.update(original_queued)
if self._full_lock.locked() and len(self._tasks) < self._maxsize:
self._full_lock.release()
|
https://github.com/ethereum/trinity/issues/2027
|
DEBUG 2020-09-03 21:51:33,790 SkeletonSyncer Skeleton sync with ETHPeer (eth, 63) <Session <Node(0x1fbb02@34.239.157.14)> a55e806f-fd96-4963-8e5f-73243d2a6820> ended
DEBUG 2020-09-03 21:51:33,791 SkeletonSyncer Skeleton syncer had 0 pending headers when it was cancelled
DEBUG 2020-09-03 21:51:33,789 MuirGlacierVM Beam pivot over 1 txn preview for <BlockHeader #10790870 55f0d7cd> b/c StateUnretrievable('No servers for CollectMissingAccount') after 0.0s, %exec 100, stats: BeamStat: accts=0, a_nodes=0, codes=0, strg=0, s_nodes=0, nodes=0, rtt=0.000s, wait=0s
�[1m�[31m ERROR 2020-09-03 21:51:33,796 asyncio Exception in callback <TaskWakeupMethWrapper object at 0x7fa4481cd710>
handle: <Handle <TaskWakeupMethWrapper object at 0x7fa4481cd710>>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 122, in _handle_cancelled
await self._real_handle_cancelled()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 173, in _real_handle_cancelled
await asyncio_task
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
trio.MultiError: QueueFull("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa4281be9d0>,), original msg: '), QueueFull("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa37b28cd10>,), original msg: ')
Details of embedded exception 1:
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 181, in add
self._open_queue.put_nowait(task)
File "/usr/local/lib/python3.7/asyncio/queues.py", line 144, in put_nowait
raise QueueFull
asyncio.queues.QueueFull
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1002, in _hang_until_storage_served
await self._serve_storage(event)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1034, in _serve_storage
event.urgent,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 368, in download_storage
await self.ensure_nodes_present(need_nodes, urgent)
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 179, in ensure_nodes_present
BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 221, in _wait_for_nodes
await queue.add(unrequested_nodes)
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 187, in add
f'because qsize={qsize}, '
asyncio.queues.QueueFull: ("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa4281be9d0>,), original msg: ')
Details of embedded exception 2:
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 181, in add
self._open_queue.put_nowait(task)
File "/usr/local/lib/python3.7/asyncio/queues.py", line 144, in put_nowait
raise QueueFull
asyncio.queues.QueueFull
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1002, in _hang_until_storage_served
await self._serve_storage(event)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1034, in _serve_storage
event.urgent,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 368, in download_storage
await self.ensure_nodes_present(need_nodes, urgent)
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 179, in ensure_nodes_present
BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 221, in _wait_for_nodes
await queue.add(unrequested_nodes)
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 187, in add
f'because qsize={qsize}, '
asyncio.queues.QueueFull: ("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa37b28cd10>,), original msg: ')�[0m
�[1m�[31m ERROR 2020-09-03 21:51:33,798 asyncio Exception in callback <TaskWakeupMethWrapper object at 0x7fa35dc06d50>
handle: <Handle <TaskWakeupMethWrapper object at 0x7fa35dc06d50>>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 234, in run
await self._wait_all_tasks_done()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 195, in _wait_all_tasks_done
await task
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 122, in _handle_cancelled
await self._real_handle_cancelled()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 173, in _real_handle_cancelled
await asyncio_task
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
trio.MultiError: QueueFull("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa4281be9d0>,), original msg: '), QueueFull("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa37b28cd10>,), original msg: ')
Details of embedded exception 1:
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 181, in add
self._open_queue.put_nowait(task)
File "/usr/local/lib/python3.7/asyncio/queues.py", line 144, in put_nowait
raise QueueFull
asyncio.queues.QueueFull
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1002, in _hang_until_storage_served
await self._serve_storage(event)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1034, in _serve_storage
event.urgent,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 368, in download_storage
await self.ensure_nodes_present(need_nodes, urgent)
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 179, in ensure_nodes_present
BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 221, in _wait_for_nodes
await queue.add(unrequested_nodes)
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 187, in add
f'because qsize={qsize}, '
asyncio.queues.QueueFull: ("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa4281be9d0>,), original msg: ')
Details of embedded exception 2:
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 181, in add
self._open_queue.put_nowait(task)
File "/usr/local/lib/python3.7/asyncio/queues.py", line 144, in put_nowait
raise QueueFull
asyncio.queues.QueueFull
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1002, in _hang_until_storage_served
await self._serve_storage(event)
File "/usr/src/app/trinity/trinity/sync/beam/chain.py", line 1034, in _serve_storage
event.urgent,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 368, in download_storage
await self.ensure_nodes_present(need_nodes, urgent)
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 179, in ensure_nodes_present
BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
File "/usr/src/app/trinity/trinity/sync/beam/state.py", line 221, in _wait_for_nodes
await queue.add(unrequested_nodes)
File "/usr/src/app/trinity/trinity/_utils/datastructures.py", line 187, in add
f'because qsize={qsize}, '
asyncio.queues.QueueFull: ("TaskQueue unsuccessful in adding task HexBytes('0xc20b1fd0d561fd36fc89a13e3c43454c7056ac697ffacd1443a0b2cec5a1a365') ", 'because qsize=6144, num_tasks=6114, maxsize=6144, open_slots=30, num queueing=1, len(_tasks)=6114, task_idx=0, queuing=(<trinity._utils.datastructures.PredefinedSortableTask object at 0x7fa37b28cd10>,), original msg: ')�[0m
DEBUG 2020-09-03 21:51:33,798 MuirGlacierVM Beam pivot over 1 txn preview for <BlockHeader #10790870 55f0d7cd> b/c StateUnretrievable('No servers for CollectMissingAccount') after 0.0s, %exec 100, stats: BeamStat: accts=0, a_nodes=0, codes=0, strg=0, s_nodes=0, nodes=0, rtt=0.000s, wait=0s
|
trio.MultiError
|
async def disconnect(self, reason: DisconnectReason) -> None:
"""
Send a Disconnect msg to the remote peer and stop ourselves.
"""
self.disconnect_nowait(reason)
await self.manager.stop()
|
async def disconnect(self, reason: DisconnectReason) -> None:
"""
On completion of this method, the peer will be disconnected
and not in the peer pool anymore.
"""
self.disconnect_nowait(reason)
await self.manager.stop()
|
https://github.com/ethereum/trinity/issues/2033
|
�[1m�[31m ERROR 2020-09-10 09:35:40,267 ETHPeerPool unexpected error during peer connection
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/protocol/common/peer.py", line 228, in maybe_connect_more_peers
for backend in self.peer_backends
File "/usr/src/app/trinity/p2p/peer_pool.py", line 223, in _add_peers_from_backend
await self.connect_to_nodes(candidates)
File "/usr/src/app/trinity/p2p/peer_pool.py", line 452, in connect_to_nodes
await asyncio.gather(*(self.connect_to_node(node) for node in batch))
File "/usr/src/app/trinity/p2p/peer_pool.py", line 498, in connect_to_node
await peer.disconnect(DisconnectReason.TOO_MANY_PEERS)
File "/usr/src/app/trinity/p2p/peer.py", line 338, in disconnect
self.disconnect_nowait(reason)
File "/usr/src/app/trinity/p2p/peer.py", line 349, in disconnect_nowait
self._send_disconnect(reason)
File "/usr/src/app/trinity/p2p/peer.py", line 353, in _send_disconnect
self._p2p_api.disconnect(reason)
AttributeError: 'ETHPeer' object has no attribute '_p2p_api'�[0m
|
AttributeError
|
async def add_outbound_peer(self, peer: BasePeer) -> None:
try:
await self._start_peer(peer)
except asyncio.TimeoutError as err:
self.logger.debug("Timeout waiting for %s to start: %s", peer, err)
return
# Check again to see if we have *become* full since the previous check.
if self.is_full:
self.logger.debug(
"Successfully connected to %s but peer pool is full. Disconnecting.",
peer,
)
await peer.disconnect(DisconnectReason.TOO_MANY_PEERS)
return
elif not self.manager.is_running:
self.logger.debug(
"Successfully connected to %s but peer pool is closing. Disconnecting.",
peer,
)
await peer.disconnect(DisconnectReason.CLIENT_QUITTING)
return
await self._add_peer_and_bootstrap(peer)
|
async def add_outbound_peer(self, peer: BasePeer) -> None:
try:
await self._start_peer(peer)
except asyncio.TimeoutError as err:
self.logger.debug("Timeout waiting for %s to start: %s", peer, err)
return
await self._add_peer_and_bootstrap(peer)
|
https://github.com/ethereum/trinity/issues/2033
|
�[1m�[31m ERROR 2020-09-10 09:35:40,267 ETHPeerPool unexpected error during peer connection
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/protocol/common/peer.py", line 228, in maybe_connect_more_peers
for backend in self.peer_backends
File "/usr/src/app/trinity/p2p/peer_pool.py", line 223, in _add_peers_from_backend
await self.connect_to_nodes(candidates)
File "/usr/src/app/trinity/p2p/peer_pool.py", line 452, in connect_to_nodes
await asyncio.gather(*(self.connect_to_node(node) for node in batch))
File "/usr/src/app/trinity/p2p/peer_pool.py", line 498, in connect_to_node
await peer.disconnect(DisconnectReason.TOO_MANY_PEERS)
File "/usr/src/app/trinity/p2p/peer.py", line 338, in disconnect
self.disconnect_nowait(reason)
File "/usr/src/app/trinity/p2p/peer.py", line 349, in disconnect_nowait
self._send_disconnect(reason)
File "/usr/src/app/trinity/p2p/peer.py", line 353, in _send_disconnect
self._p2p_api.disconnect(reason)
AttributeError: 'ETHPeer' object has no attribute '_p2p_api'�[0m
|
AttributeError
|
async def connect_to_node(self, node: NodeAPI) -> None:
"""
Connect to a single node quietly aborting if the peer pool is full or
shutting down, or one of the expected peer level exceptions is raised
while connecting.
"""
if self.is_full or not self.manager.is_running:
self.logger.warning(
"Asked to connect to node when either full or not operational"
)
return
if self._handshake_locks.is_locked(node):
self.logger.info(
"Asked to connect to node when handshake lock is already locked, will wait"
)
async with self.lock_node_for_handshake(node):
if self.is_connected_to_node(node):
self.logger.debug(
"Aborting outbound connection attempt to %s. Already connected!",
node,
)
return
try:
async with self._connection_attempt_lock:
peer = await self.connect(node)
except ALLOWED_PEER_CONNECTION_EXCEPTIONS:
return
await self.add_outbound_peer(peer)
|
async def connect_to_node(self, node: NodeAPI) -> None:
"""
Connect to a single node quietly aborting if the peer pool is full or
shutting down, or one of the expected peer level exceptions is raised
while connecting.
"""
if self.is_full or not self.manager.is_running:
self.logger.warning(
"Asked to connect to node when either full or not operational"
)
return
if self._handshake_locks.is_locked(node):
self.logger.info(
"Asked to connect to node when handshake lock is already locked, will wait"
)
async with self.lock_node_for_handshake(node):
if self.is_connected_to_node(node):
self.logger.debug(
"Aborting outbound connection attempt to %s. Already connected!",
node,
)
return
try:
async with self._connection_attempt_lock:
peer = await self.connect(node)
except ALLOWED_PEER_CONNECTION_EXCEPTIONS:
return
# Check again to see if we have *become* full since the previous
# check.
if self.is_full:
self.logger.debug(
"Successfully connected to %s but peer pool is full. Disconnecting.",
peer,
)
await peer.disconnect(DisconnectReason.TOO_MANY_PEERS)
return
elif not self.manager.is_running:
self.logger.debug(
"Successfully connected to %s but peer pool is closing. Disconnecting.",
peer,
)
await peer.disconnect(DisconnectReason.CLIENT_QUITTING)
return
else:
await self.add_outbound_peer(peer)
|
https://github.com/ethereum/trinity/issues/2033
|
�[1m�[31m ERROR 2020-09-10 09:35:40,267 ETHPeerPool unexpected error during peer connection
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/protocol/common/peer.py", line 228, in maybe_connect_more_peers
for backend in self.peer_backends
File "/usr/src/app/trinity/p2p/peer_pool.py", line 223, in _add_peers_from_backend
await self.connect_to_nodes(candidates)
File "/usr/src/app/trinity/p2p/peer_pool.py", line 452, in connect_to_nodes
await asyncio.gather(*(self.connect_to_node(node) for node in batch))
File "/usr/src/app/trinity/p2p/peer_pool.py", line 498, in connect_to_node
await peer.disconnect(DisconnectReason.TOO_MANY_PEERS)
File "/usr/src/app/trinity/p2p/peer.py", line 338, in disconnect
self.disconnect_nowait(reason)
File "/usr/src/app/trinity/p2p/peer.py", line 349, in disconnect_nowait
self._send_disconnect(reason)
File "/usr/src/app/trinity/p2p/peer.py", line 353, in _send_disconnect
self._p2p_api.disconnect(reason)
AttributeError: 'ETHPeer' object has no attribute '_p2p_api'�[0m
|
AttributeError
|
async def run_behaviors(self, behaviors: Tuple[BehaviorAPI, ...]) -> None:
async with contextlib.AsyncExitStack() as stack:
futures: List[asyncio.Task[Any]] = [
create_task(
self.manager.wait_finished(), "Connection/run_behaviors/wait_finished"
)
]
for behavior in behaviors:
if behavior.should_apply_to(self):
behavior_exit = await stack.enter_async_context(behavior.apply(self))
futures.append(behavior_exit)
self.behaviors_applied.set()
# If wait_first() is called, cleanup_tasks() will be a no-op, but if any post_apply()
# calls raise an exception, it will ensure we don't leak pending tasks that would
# cause asyncio to complain.
async with cleanup_tasks(*futures):
try:
for behavior in behaviors:
behavior.post_apply()
await wait_first(futures, max_wait_after_cancellation=2)
except asyncio.TimeoutError:
self.logger.warning(
"Timed out waiting for tasks to terminate after cancellation: %s",
futures,
)
except PeerConnectionLost:
# Any of our behaviors may propagate a PeerConnectionLost, which is to be
# expected as many Connection APIs used by them can raise that. To avoid a
# DaemonTaskExit since we're returning silently, ensure we're cancelled.
pass
finally:
self.manager.cancel()
|
async def run_behaviors(self, behaviors: Tuple[BehaviorAPI, ...]) -> None:
async with contextlib.AsyncExitStack() as stack:
futures: List[asyncio.Task[Any]] = [
create_task(
self.manager.wait_finished(), "Connection/run_behaviors/wait_finished"
)
]
for behavior in behaviors:
if behavior.should_apply_to(self):
behavior_exit = await stack.enter_async_context(behavior.apply(self))
futures.append(behavior_exit)
self.behaviors_applied.set()
# If wait_first() is called, cleanup_tasks() will be a no-op, but if any post_apply()
# calls raise an exception, it will ensure we don't leak pending tasks that would
# cause asyncio to complain.
async with cleanup_tasks(*futures):
try:
for behavior in behaviors:
behavior.post_apply()
await wait_first(futures, max_wait_after_cancellation=2)
except PeerConnectionLost:
# Any of our behaviors may propagate a PeerConnectionLost, which is to be
# expected as many Connection APIs used by them can raise that. To avoid a
# DaemonTaskExit since we're returning silently, ensure we're cancelled.
self.manager.cancel()
|
https://github.com/ethereum/trinity/issues/2018
|
�[1m�[33m WARNING 2020-09-04 04:19:18,179 Sync / PeerPool Event loop blocked or overloaded: delay=2.005s, tasks=74�[0m
�[1m�[31m ERROR 2020-09-04 04:19:18,181 asyncio Exception in callback <TaskWakeupMethWrapper object at 0x7fa47b456e90>
handle: <Handle <TaskWakeupMethWrapper object at 0x7fa47b456e90>>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
trio.MultiError: TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>}), TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 1:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 2:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})�[0m
|
trio.MultiError
|
async def do_run(self, event_bus: EndpointAPI) -> None:
boot_info = self._boot_info
if boot_info.args.enable_metrics:
metrics_service = metrics_service_from_args(
boot_info.args, AsyncioMetricsService
)
else:
# Use a NoopMetricsService so that no code branches need to be taken if metrics
# are disabled
metrics_service = NOOP_METRICS_SERVICE
trinity_config = boot_info.trinity_config
NodeClass = trinity_config.get_app_config(Eth1AppConfig).node_class
node = NodeClass(event_bus, metrics_service, trinity_config)
strategy = self.get_active_strategy(boot_info)
async with background_asyncio_service(node) as node_manager:
sync_task = create_task(
self.launch_sync(node, strategy, boot_info, event_bus), self.name
)
# The Node service is our responsibility, so we must exit if either that or the syncer
# returns.
node_manager_task = create_task(
node_manager.wait_finished(), f"{NodeClass.__name__} wait_finished() task"
)
tasks = [sync_task, node_manager_task]
try:
await wait_first(tasks, max_wait_after_cancellation=2)
except asyncio.TimeoutError:
self.logger.warning(
"Timed out waiting for tasks to terminate after cancellation: %s", tasks
)
|
async def do_run(self, event_bus: EndpointAPI) -> None:
boot_info = self._boot_info
if boot_info.args.enable_metrics:
metrics_service = metrics_service_from_args(
boot_info.args, AsyncioMetricsService
)
else:
# Use a NoopMetricsService so that no code branches need to be taken if metrics
# are disabled
metrics_service = NOOP_METRICS_SERVICE
trinity_config = boot_info.trinity_config
NodeClass = trinity_config.get_app_config(Eth1AppConfig).node_class
node = NodeClass(event_bus, metrics_service, trinity_config)
strategy = self.get_active_strategy(boot_info)
async with background_asyncio_service(node) as node_manager:
sync_task = create_task(
self.launch_sync(node, strategy, boot_info, event_bus), self.name
)
# The Node service is our responsibility, so we must exit if either that or the syncer
# returns.
node_manager_task = create_task(
node_manager.wait_finished(), f"{NodeClass.__name__} wait_finished() task"
)
await wait_first([sync_task, node_manager_task], max_wait_after_cancellation=2)
|
https://github.com/ethereum/trinity/issues/2018
|
�[1m�[33m WARNING 2020-09-04 04:19:18,179 Sync / PeerPool Event loop blocked or overloaded: delay=2.005s, tasks=74�[0m
�[1m�[31m ERROR 2020-09-04 04:19:18,181 asyncio Exception in callback <TaskWakeupMethWrapper object at 0x7fa47b456e90>
handle: <Handle <TaskWakeupMethWrapper object at 0x7fa47b456e90>>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
trio.MultiError: TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>}), TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 1:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 2:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})�[0m
|
trio.MultiError
|
async def _do_run(self) -> None:
with child_process_logging(self._boot_info):
endpoint_name = self.get_endpoint_name()
event_bus_service = AsyncioEventBusService(
self._boot_info.trinity_config,
endpoint_name,
)
async with background_asyncio_service(event_bus_service) as eventbus_manager:
event_bus = await event_bus_service.get_event_bus()
loop_monitoring_task = create_task(
self._loop_monitoring_task(event_bus),
f"AsyncioIsolatedComponent/{self.name}/loop_monitoring_task",
)
do_run_task = create_task(
self.do_run(event_bus), f"AsyncioIsolatedComponent/{self.name}/do_run"
)
eventbus_task = create_task(
eventbus_manager.wait_finished(),
f"AsyncioIsolatedComponent/{self.name}/eventbus/wait_finished",
)
try:
max_wait_after_cancellation = 2
tasks = [do_run_task, eventbus_task, loop_monitoring_task]
if self._boot_info.profile:
with profiler(f"profile_{self.get_endpoint_name()}"):
try:
await wait_first(
tasks,
max_wait_after_cancellation,
)
except asyncio.TimeoutError:
self.logger.warning(
"Timed out waiting for tasks to "
"terminate after cancellation: %s",
tasks,
)
else:
# XXX: When open_in_process() injects a KeyboardInterrupt into us (via
# coro.throw()), we hang forever here, until open_in_process() times
# out and sends us a SIGTERM, at which point we exit without executing
# either the except or the finally blocks below.
# See https://github.com/ethereum/trinity/issues/1711 for more.
try:
await wait_first(
tasks,
max_wait_after_cancellation,
)
except asyncio.TimeoutError:
self.logger.warning(
"Timed out waiting for tasks to terminate after cancellation: %s",
tasks,
)
except KeyboardInterrupt:
self.logger.debug("%s: KeyboardInterrupt", self)
# Currently we never reach this code path, but when we fix the issue above
# it will be needed.
return
finally:
# Once we start seeing this in the logs after a Ctrl-C, we'll likely have
# figured out the issue above.
self.logger.debug("%s: do_run() finished", self)
|
async def _do_run(self) -> None:
with child_process_logging(self._boot_info):
endpoint_name = self.get_endpoint_name()
event_bus_service = AsyncioEventBusService(
self._boot_info.trinity_config,
endpoint_name,
)
async with background_asyncio_service(event_bus_service) as eventbus_manager:
event_bus = await event_bus_service.get_event_bus()
loop_monitoring_task = create_task(
self._loop_monitoring_task(event_bus),
f"AsyncioIsolatedComponent/{self.name}/loop_monitoring_task",
)
do_run_task = create_task(
self.do_run(event_bus), f"AsyncioIsolatedComponent/{self.name}/do_run"
)
eventbus_task = create_task(
eventbus_manager.wait_finished(),
f"AsyncioIsolatedComponent/{self.name}/eventbus/wait_finished",
)
try:
max_wait_after_cancellation = 2
if self._boot_info.profile:
with profiler(f"profile_{self.get_endpoint_name()}"):
await wait_first(
[do_run_task, eventbus_task, loop_monitoring_task],
max_wait_after_cancellation,
)
else:
# XXX: When open_in_process() injects a KeyboardInterrupt into us (via
# coro.throw()), we hang forever here, until open_in_process() times
# out and sends us a SIGTERM, at which point we exit without executing
# either the except or the finally blocks below.
# See https://github.com/ethereum/trinity/issues/1711 for more.
await wait_first(
[do_run_task, eventbus_task, loop_monitoring_task],
max_wait_after_cancellation,
)
except KeyboardInterrupt:
self.logger.debug("%s: KeyboardInterrupt", self)
# Currently we never reach this code path, but when we fix the issue above
# it will be needed.
return
finally:
# Once we start seeing this in the logs after a Ctrl-C, we'll likely have
# figured out the issue above.
self.logger.debug("%s: do_run() finished", self)
|
https://github.com/ethereum/trinity/issues/2018
|
�[1m�[33m WARNING 2020-09-04 04:19:18,179 Sync / PeerPool Event loop blocked or overloaded: delay=2.005s, tasks=74�[0m
�[1m�[31m ERROR 2020-09-04 04:19:18,181 asyncio Exception in callback <TaskWakeupMethWrapper object at 0x7fa47b456e90>
handle: <Handle <TaskWakeupMethWrapper object at 0x7fa47b456e90>>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
trio.MultiError: TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>}), TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 1:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 2:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})�[0m
|
trio.MultiError
|
def run_asyncio_eth1_component(
component_type: Type["AsyncioIsolatedComponent"],
) -> None:
import asyncio
from p2p.asyncio_utils import wait_first
loop = asyncio.get_event_loop()
got_sigint = asyncio.Event()
loop.add_signal_handler(signal.SIGINT, got_sigint.set)
loop.add_signal_handler(signal.SIGTERM, got_sigint.set)
async def run() -> None:
component, connect_to_endpoints = _setup_standalone_component(
component_type, APP_IDENTIFIER_ETH1
)
async with _run_eventbus_for_component(
component, connect_to_endpoints
) as event_bus:
async with _run_asyncio_component_in_proc(
component, event_bus
) as component_task:
sigint_task = asyncio.create_task(got_sigint.wait())
tasks = [component_task, sigint_task]
try:
await wait_first(tasks, max_wait_after_cancellation=2)
except asyncio.TimeoutError:
logger.warning(
"Timed out waiting for tasks to terminate after cancellation: %s",
tasks,
)
loop.run_until_complete(run())
|
def run_asyncio_eth1_component(
component_type: Type["AsyncioIsolatedComponent"],
) -> None:
import asyncio
from p2p.asyncio_utils import wait_first
loop = asyncio.get_event_loop()
got_sigint = asyncio.Event()
loop.add_signal_handler(signal.SIGINT, got_sigint.set)
loop.add_signal_handler(signal.SIGTERM, got_sigint.set)
async def run() -> None:
component, connect_to_endpoints = _setup_standalone_component(
component_type, APP_IDENTIFIER_ETH1
)
async with _run_eventbus_for_component(
component, connect_to_endpoints
) as event_bus:
async with _run_asyncio_component_in_proc(
component, event_bus
) as component_task:
sigint_task = asyncio.create_task(got_sigint.wait())
await wait_first(
[component_task, sigint_task], max_wait_after_cancellation=2
)
loop.run_until_complete(run())
|
https://github.com/ethereum/trinity/issues/2018
|
�[1m�[33m WARNING 2020-09-04 04:19:18,179 Sync / PeerPool Event loop blocked or overloaded: delay=2.005s, tasks=74�[0m
�[1m�[31m ERROR 2020-09-04 04:19:18,181 asyncio Exception in callback <TaskWakeupMethWrapper object at 0x7fa47b456e90>
handle: <Handle <TaskWakeupMethWrapper object at 0x7fa47b456e90>>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
trio.MultiError: TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>}), TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 1:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 2:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})�[0m
|
trio.MultiError
|
async def run() -> None:
component, connect_to_endpoints = _setup_standalone_component(
component_type, APP_IDENTIFIER_ETH1
)
async with _run_eventbus_for_component(
component, connect_to_endpoints
) as event_bus:
async with _run_asyncio_component_in_proc(
component, event_bus
) as component_task:
sigint_task = asyncio.create_task(got_sigint.wait())
tasks = [component_task, sigint_task]
try:
await wait_first(tasks, max_wait_after_cancellation=2)
except asyncio.TimeoutError:
logger.warning(
"Timed out waiting for tasks to terminate after cancellation: %s",
tasks,
)
|
async def run() -> None:
component, connect_to_endpoints = _setup_standalone_component(
component_type, APP_IDENTIFIER_ETH1
)
async with _run_eventbus_for_component(
component, connect_to_endpoints
) as event_bus:
async with _run_asyncio_component_in_proc(
component, event_bus
) as component_task:
sigint_task = asyncio.create_task(got_sigint.wait())
await wait_first(
[component_task, sigint_task], max_wait_after_cancellation=2
)
|
https://github.com/ethereum/trinity/issues/2018
|
�[1m�[33m WARNING 2020-09-04 04:19:18,179 Sync / PeerPool Event loop blocked or overloaded: delay=2.005s, tasks=74�[0m
�[1m�[31m ERROR 2020-09-04 04:19:18,181 asyncio Exception in callback <TaskWakeupMethWrapper object at 0x7fa47b456e90>
handle: <Handle <TaskWakeupMethWrapper object at 0x7fa47b456e90>>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 70, in uvloop.loop.Handle._run
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
trio.MultiError: TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>}), TimeoutError('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 1:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})
Details of embedded exception 2:
Traceback (most recent call last):
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 82, in cancel_pending_tasks
yield
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 54, in wait_first
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 389, in wait
return await _wait(fs, timeout, return_when, loop)
File "/usr/local/lib/python3.7/asyncio/tasks.py", line 482, in _wait
await waiter
concurrent.futures._base.CancelledError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 165, in run
await self.child_manager.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 246, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/asyncio.py", line 35, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/p2p/connection.py", line 133, in run_behaviors
await wait_first(futures, max_wait_after_cancellation=2)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 69, in wait_first
raise done_task.exception()
File "/usr/local/lib/python3.7/contextlib.py", line 188, in __aexit__
await self.gen.athrow(typ, value, traceback)
File "/usr/src/app/trinity/p2p/asyncio_utils.py", line 113, in cancel_pending_tasks
raise MultiError(errors)
concurrent.futures._base.TimeoutError: ('Tasks never returned after being cancelled: %s', {<Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>, <Task cancelled coro=<wait_first() done, defined at /usr/src/app/trinity/p2p/asyncio_utils.py:31>>})�[0m
|
trio.MultiError
|
async def run_background_asyncio_services(services: Sequence[ServiceAPI]) -> None:
async with contextlib.AsyncExitStack() as stack:
managers = tuple(
[
await stack.enter_async_context(background_asyncio_service(service))
for service in services
]
)
# If any of the services terminate, we do so as well.
await wait_first_asyncio(
[asyncio.create_task(manager.wait_finished()) for manager in managers]
)
|
async def run_background_asyncio_services(services: Sequence[ServiceAPI]) -> None:
await _run_background_services(services, background_asyncio_service)
|
https://github.com/ethereum/trinity/issues/1903
|
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/extensibility/trio.py", line 50, in run_process
await self.do_run(event_bus)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/component.py", line 171, in do_run
await managers[0].wait_finished()
File "/usr/local/lib/python3.7/contextlib.py", line 678, in __aexit__
raise exc_details[1]
File "/usr/local/lib/python3.7/contextlib.py", line 661, in __aexit__
cb_suppress = await cb(*exc_details)
File "/usr/local/lib/python3.7/contextlib.py", line 552, in _exit_wrapper
return await cm_exit(cm, exc_type, exc, tb)
File "/usr/local/lib/python3.7/site-packages/async_generator/_util.py", line 53, in __aexit__
await self._agen.athrow(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 411, in background_trio_service
await manager.stop()
File "/usr/local/lib/python3.7/site-packages/trio/_core/_run.py", line 730, in __aexit__
raise combined_error_from_nursery
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 208, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 76, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/service/trio.py", line 10, in continuously_report
self._reporter.report_now()
File "/usr/local/lib/python3.7/site-packages/pyformance/reporters/influx.py", line 88, in report_now
response = urlopen(request)
File "/usr/local/lib/python3.7/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/usr/local/lib/python3.7/urllib/request.py", line 525, in open
response = self._open(req, data)
File "/usr/local/lib/python3.7/urllib/request.py", line 543, in _open
'_open', req)
File "/usr/local/lib/python3.7/urllib/request.py", line 503, in _call_chain
result = func(*args)
File "/usr/local/lib/python3.7/urllib/request.py", line 1345, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "/usr/local/lib/python3.7/urllib/request.py", line 1320, in do_open
r = h.getresponse()
File "/usr/local/lib/python3.7/http/client.py", line 1336, in getresponse
response.begin()
File "/usr/local/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/lib/python3.7/http/client.py", line 275, in _read_status
raise RemoteDisconnected("Remote end closed connection without"
http.client.RemoteDisconnected: Remote end closed connection without response
|
combined_error
|
async def run_background_trio_services(services: Sequence[ServiceAPI]) -> None:
async with contextlib.AsyncExitStack() as stack:
managers = tuple(
[
await stack.enter_async_context(background_trio_service(service))
for service in services
]
)
# If any of the services terminate, we do so as well.
await wait_first_trio([manager.wait_finished for manager in managers])
|
async def run_background_trio_services(services: Sequence[ServiceAPI]) -> None:
await _run_background_services(services, background_trio_service)
|
https://github.com/ethereum/trinity/issues/1903
|
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/extensibility/trio.py", line 50, in run_process
await self.do_run(event_bus)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/component.py", line 171, in do_run
await managers[0].wait_finished()
File "/usr/local/lib/python3.7/contextlib.py", line 678, in __aexit__
raise exc_details[1]
File "/usr/local/lib/python3.7/contextlib.py", line 661, in __aexit__
cb_suppress = await cb(*exc_details)
File "/usr/local/lib/python3.7/contextlib.py", line 552, in _exit_wrapper
return await cm_exit(cm, exc_type, exc, tb)
File "/usr/local/lib/python3.7/site-packages/async_generator/_util.py", line 53, in __aexit__
await self._agen.athrow(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 411, in background_trio_service
await manager.stop()
File "/usr/local/lib/python3.7/site-packages/trio/_core/_run.py", line 730, in __aexit__
raise combined_error_from_nursery
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 208, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 76, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/service/trio.py", line 10, in continuously_report
self._reporter.report_now()
File "/usr/local/lib/python3.7/site-packages/pyformance/reporters/influx.py", line 88, in report_now
response = urlopen(request)
File "/usr/local/lib/python3.7/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/usr/local/lib/python3.7/urllib/request.py", line 525, in open
response = self._open(req, data)
File "/usr/local/lib/python3.7/urllib/request.py", line 543, in _open
'_open', req)
File "/usr/local/lib/python3.7/urllib/request.py", line 503, in _call_chain
result = func(*args)
File "/usr/local/lib/python3.7/urllib/request.py", line 1345, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "/usr/local/lib/python3.7/urllib/request.py", line 1320, in do_open
r = h.getresponse()
File "/usr/local/lib/python3.7/http/client.py", line 1336, in getresponse
response.begin()
File "/usr/local/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/lib/python3.7/http/client.py", line 275, in _read_status
raise RemoteDisconnected("Remote end closed connection without"
http.client.RemoteDisconnected: Remote end closed connection without response
|
combined_error
|
async def continuously_report(self) -> None:
while self.manager.is_running:
super().report_now()
await asyncio.sleep(self._reporting_frequency)
|
async def continuously_report(self) -> None:
while self.manager.is_running:
self._reporter.report_now()
await asyncio.sleep(self._reporting_frequency)
|
https://github.com/ethereum/trinity/issues/1903
|
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/extensibility/trio.py", line 50, in run_process
await self.do_run(event_bus)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/component.py", line 171, in do_run
await managers[0].wait_finished()
File "/usr/local/lib/python3.7/contextlib.py", line 678, in __aexit__
raise exc_details[1]
File "/usr/local/lib/python3.7/contextlib.py", line 661, in __aexit__
cb_suppress = await cb(*exc_details)
File "/usr/local/lib/python3.7/contextlib.py", line 552, in _exit_wrapper
return await cm_exit(cm, exc_type, exc, tb)
File "/usr/local/lib/python3.7/site-packages/async_generator/_util.py", line 53, in __aexit__
await self._agen.athrow(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 411, in background_trio_service
await manager.stop()
File "/usr/local/lib/python3.7/site-packages/trio/_core/_run.py", line 730, in __aexit__
raise combined_error_from_nursery
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 208, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 76, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/service/trio.py", line 10, in continuously_report
self._reporter.report_now()
File "/usr/local/lib/python3.7/site-packages/pyformance/reporters/influx.py", line 88, in report_now
response = urlopen(request)
File "/usr/local/lib/python3.7/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/usr/local/lib/python3.7/urllib/request.py", line 525, in open
response = self._open(req, data)
File "/usr/local/lib/python3.7/urllib/request.py", line 543, in _open
'_open', req)
File "/usr/local/lib/python3.7/urllib/request.py", line 503, in _call_chain
result = func(*args)
File "/usr/local/lib/python3.7/urllib/request.py", line 1345, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "/usr/local/lib/python3.7/urllib/request.py", line 1320, in do_open
r = h.getresponse()
File "/usr/local/lib/python3.7/http/client.py", line 1336, in getresponse
response.begin()
File "/usr/local/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/lib/python3.7/http/client.py", line 275, in _read_status
raise RemoteDisconnected("Remote end closed connection without"
http.client.RemoteDisconnected: Remote end closed connection without response
|
combined_error
|
def __init__(
self,
influx_server: str,
influx_user: str,
influx_password: str,
influx_database: str,
host: str,
port: int,
protocol: str,
reporting_frequency: int,
):
self._unreported_error: Exception = None
self._last_time_reported: float = 0.0
self._influx_server = influx_server
self._reporting_frequency = reporting_frequency
self._registry = HostMetricsRegistry(host)
self._reporter = InfluxReporter(
registry=self._registry,
database=influx_database,
username=influx_user,
password=influx_password,
protocol=protocol,
port=port,
server=influx_server,
)
|
def __init__(
self,
influx_server: str,
influx_user: str,
influx_password: str,
influx_database: str,
host: str,
port: int,
protocol: str,
reporting_frequency: int,
):
self._influx_server = influx_server
self._reporting_frequency = reporting_frequency
self._registry = HostMetricsRegistry(host)
self._reporter = InfluxReporter(
registry=self._registry,
database=influx_database,
username=influx_user,
password=influx_password,
protocol=protocol,
port=port,
server=influx_server,
)
|
https://github.com/ethereum/trinity/issues/1903
|
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/extensibility/trio.py", line 50, in run_process
await self.do_run(event_bus)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/component.py", line 171, in do_run
await managers[0].wait_finished()
File "/usr/local/lib/python3.7/contextlib.py", line 678, in __aexit__
raise exc_details[1]
File "/usr/local/lib/python3.7/contextlib.py", line 661, in __aexit__
cb_suppress = await cb(*exc_details)
File "/usr/local/lib/python3.7/contextlib.py", line 552, in _exit_wrapper
return await cm_exit(cm, exc_type, exc, tb)
File "/usr/local/lib/python3.7/site-packages/async_generator/_util.py", line 53, in __aexit__
await self._agen.athrow(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 411, in background_trio_service
await manager.stop()
File "/usr/local/lib/python3.7/site-packages/trio/_core/_run.py", line 730, in __aexit__
raise combined_error_from_nursery
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 208, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 76, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/service/trio.py", line 10, in continuously_report
self._reporter.report_now()
File "/usr/local/lib/python3.7/site-packages/pyformance/reporters/influx.py", line 88, in report_now
response = urlopen(request)
File "/usr/local/lib/python3.7/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/usr/local/lib/python3.7/urllib/request.py", line 525, in open
response = self._open(req, data)
File "/usr/local/lib/python3.7/urllib/request.py", line 543, in _open
'_open', req)
File "/usr/local/lib/python3.7/urllib/request.py", line 503, in _call_chain
result = func(*args)
File "/usr/local/lib/python3.7/urllib/request.py", line 1345, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "/usr/local/lib/python3.7/urllib/request.py", line 1320, in do_open
r = h.getresponse()
File "/usr/local/lib/python3.7/http/client.py", line 1336, in getresponse
response.begin()
File "/usr/local/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/lib/python3.7/http/client.py", line 275, in _read_status
raise RemoteDisconnected("Remote end closed connection without"
http.client.RemoteDisconnected: Remote end closed connection without response
|
combined_error
|
async def continuously_report(self) -> None:
async for _ in trio_utils.every(self._reporting_frequency):
super().report_now()
|
async def continuously_report(self) -> None:
async for _ in trio_utils.every(self._reporting_frequency):
self._reporter.report_now()
|
https://github.com/ethereum/trinity/issues/1903
|
Traceback (most recent call last):
File "/usr/src/app/trinity/trinity/extensibility/trio.py", line 50, in run_process
await self.do_run(event_bus)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/component.py", line 171, in do_run
await managers[0].wait_finished()
File "/usr/local/lib/python3.7/contextlib.py", line 678, in __aexit__
raise exc_details[1]
File "/usr/local/lib/python3.7/contextlib.py", line 661, in __aexit__
cb_suppress = await cb(*exc_details)
File "/usr/local/lib/python3.7/contextlib.py", line 552, in _exit_wrapper
return await cm_exit(cm, exc_type, exc, tb)
File "/usr/local/lib/python3.7/site-packages/async_generator/_util.py", line 53, in __aexit__
await self._agen.athrow(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 411, in background_trio_service
await manager.stop()
File "/usr/local/lib/python3.7/site-packages/trio/_core/_run.py", line 730, in __aexit__
raise combined_error_from_nursery
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 208, in run
for _, exc_value, exc_tb in self._errors
File "/usr/local/lib/python3.7/site-packages/async_service/base.py", line 300, in _run_and_manage_task
await task.run()
File "/usr/local/lib/python3.7/site-packages/async_service/trio.py", line 76, in run
await self._async_fn(*self._async_fn_args)
File "/usr/src/app/trinity/trinity/components/builtin/metrics/service/trio.py", line 10, in continuously_report
self._reporter.report_now()
File "/usr/local/lib/python3.7/site-packages/pyformance/reporters/influx.py", line 88, in report_now
response = urlopen(request)
File "/usr/local/lib/python3.7/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/usr/local/lib/python3.7/urllib/request.py", line 525, in open
response = self._open(req, data)
File "/usr/local/lib/python3.7/urllib/request.py", line 543, in _open
'_open', req)
File "/usr/local/lib/python3.7/urllib/request.py", line 503, in _call_chain
result = func(*args)
File "/usr/local/lib/python3.7/urllib/request.py", line 1345, in http_open
return self.do_open(http.client.HTTPConnection, req)
File "/usr/local/lib/python3.7/urllib/request.py", line 1320, in do_open
r = h.getresponse()
File "/usr/local/lib/python3.7/http/client.py", line 1336, in getresponse
response.begin()
File "/usr/local/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/lib/python3.7/http/client.py", line 275, in _read_status
raise RemoteDisconnected("Remote end closed connection without"
http.client.RemoteDisconnected: Remote end closed connection without response
|
combined_error
|
def __init__(
self,
transport: TransportAPI,
base_protocol: BaseP2PProtocol,
protocols: Sequence[ProtocolAPI],
token: CancelToken = None,
max_queue_size: int = 4096,
) -> None:
if token is None:
loop = None
else:
loop = token.loop
base_token = CancelToken(f"multiplexer[{transport.remote}]", loop=loop)
if token is None:
self.cancel_token = base_token
else:
self.cancel_token = base_token.chain(token)
self._transport = transport
# the base `p2p` protocol instance.
self._base_protocol = base_protocol
# the sub-protocol instances
self._protocols = protocols
# Lock to ensure that multiple call sites cannot concurrently stream
# messages.
self._multiplex_lock = asyncio.Lock()
# Lock management on a per-protocol basis to ensure we only have one
# stream consumer for each protocol.
self._protocol_locks = {
type(protocol): asyncio.Lock() for protocol in self.get_protocols()
}
# Each protocol gets a queue where messages for the individual protocol
# are placed when streamed from the transport
self._protocol_queues = {
type(protocol): asyncio.Queue(max_queue_size)
for protocol in self.get_protocols()
}
self._msg_counts = collections.defaultdict(int)
|
def __init__(
self,
transport: TransportAPI,
base_protocol: BaseP2PProtocol,
protocols: Sequence[ProtocolAPI],
token: CancelToken = None,
max_queue_size: int = 4096,
) -> None:
if token is None:
loop = None
else:
loop = token.loop
base_token = CancelToken(f"multiplexer[{transport.remote}]", loop=loop)
if token is None:
self.cancel_token = base_token
else:
self.cancel_token = base_token.chain(token)
self._transport = transport
# the base `p2p` protocol instance.
self._base_protocol = base_protocol
# the sub-protocol instances
self._protocols = protocols
# Lock to ensure that multiple call sites cannot concurrently stream
# messages.
self._multiplex_lock = asyncio.Lock()
# Lock management on a per-protocol basis to ensure we only have one
# stream consumer for each protocol.
self._protocol_locks = ResourceLock()
# Each protocol gets a queue where messages for the individual protocol
# are placed when streamed from the transport
self._protocol_queues = {
type(protocol): asyncio.Queue(max_queue_size)
for protocol in self.get_protocols()
}
self._msg_counts = collections.defaultdict(int)
|
https://github.com/ethereum/trinity/issues/1405
|
DEBUG 12-18 15:21:47 async_process_runner.py b'\x1b[1m\x1b[31m ERROR 2019-12-18 15:21:47,268 LESPeerPool unexpected error during peer connection\n'
DEBUG 12-18 15:21:47 async_process_runner.py b'Traceback (most recent call last):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 209, in maybe_connect_more_peers\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' for backend in self.peer_backends\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 191, in _add_peers_from_backend\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' await self.connect_to_nodes(iter(candidates))\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 419, in connect_to_nodes\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' loop=self.get_event_loop(),\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 440, in connect_to_node\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' async with self.lock_node_for_handshake(node):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 423, in lock_node_for_handshake\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self._handshake_locks.lock(node)\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/resource_lock.py", line 18, in lock\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' lock = self._locks[resource]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/.tox/py36-wheel-cli/lib/python3.6/weakref.py", line 394, in __getitem__\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self.data[ref(key)]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b"KeyError: <weakref at 0x7f48b9d9cf48; to 'Node' at 0x7f48b83175c0>\x1b[0m\n"
|
KeyError
|
def stream_protocol_messages(
self,
protocol_identifier: Union[ProtocolAPI, Type[ProtocolAPI]],
) -> AsyncIterator[CommandAPI[Any]]:
"""
Stream the messages for the specified protocol.
"""
if isinstance(protocol_identifier, ProtocolAPI):
protocol_class = type(protocol_identifier)
elif isinstance(protocol_identifier, type) and issubclass(
protocol_identifier, ProtocolAPI
):
protocol_class = protocol_identifier
else:
raise TypeError("Unknown protocol identifier: {protocol}")
if not self.has_protocol(protocol_class):
raise UnknownProtocol(f"Unknown protocol '{protocol_class}'")
if self._protocol_locks[protocol_class].locked():
raise Exception(f"Streaming lock for {protocol_class} is not free.")
elif not self._multiplex_lock.locked():
raise Exception("Not multiplexed.")
# Mostly a sanity check but this ensures we do better than accidentally
# raising an attribute error in whatever race conditions or edge cases
# potentially make the `_multiplex_token` unavailable.
if not hasattr(self, "_multiplex_token"):
raise Exception("No cancel token found for multiplexing.")
# We do the wait_iter here so that the call sites in the handshakers
# that use this don't need to be aware of cancellation tokens.
return self.wait_iter(
self._stream_protocol_messages(protocol_class),
token=self._multiplex_token,
)
|
def stream_protocol_messages(
self,
protocol_identifier: Union[ProtocolAPI, Type[ProtocolAPI]],
) -> AsyncIterator[CommandAPI[Any]]:
"""
Stream the messages for the specified protocol.
"""
if isinstance(protocol_identifier, ProtocolAPI):
protocol_class = type(protocol_identifier)
elif isinstance(protocol_identifier, type) and issubclass(
protocol_identifier, ProtocolAPI
):
protocol_class = protocol_identifier
else:
raise TypeError("Unknown protocol identifier: {protocol}")
if not self.has_protocol(protocol_class):
raise UnknownProtocol(f"Unknown protocol '{protocol_class}'")
if self._protocol_locks.is_locked(protocol_class):
raise Exception(f"Streaming lock for {protocol_class} is not free.")
elif not self._multiplex_lock.locked():
raise Exception("Not multiplexed.")
# Mostly a sanity check but this ensures we do better than accidentally
# raising an attribute error in whatever race conditions or edge cases
# potentially make the `_multiplex_token` unavailable.
if not hasattr(self, "_multiplex_token"):
raise Exception("No cancel token found for multiplexing.")
# We do the wait_iter here so that the call sites in the handshakers
# that use this don't need to be aware of cancellation tokens.
return self.wait_iter(
self._stream_protocol_messages(protocol_class),
token=self._multiplex_token,
)
|
https://github.com/ethereum/trinity/issues/1405
|
DEBUG 12-18 15:21:47 async_process_runner.py b'\x1b[1m\x1b[31m ERROR 2019-12-18 15:21:47,268 LESPeerPool unexpected error during peer connection\n'
DEBUG 12-18 15:21:47 async_process_runner.py b'Traceback (most recent call last):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 209, in maybe_connect_more_peers\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' for backend in self.peer_backends\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 191, in _add_peers_from_backend\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' await self.connect_to_nodes(iter(candidates))\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 419, in connect_to_nodes\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' loop=self.get_event_loop(),\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 440, in connect_to_node\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' async with self.lock_node_for_handshake(node):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 423, in lock_node_for_handshake\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self._handshake_locks.lock(node)\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/resource_lock.py", line 18, in lock\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' lock = self._locks[resource]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/.tox/py36-wheel-cli/lib/python3.6/weakref.py", line 394, in __getitem__\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self.data[ref(key)]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b"KeyError: <weakref at 0x7f48b9d9cf48; to 'Node' at 0x7f48b83175c0>\x1b[0m\n"
|
KeyError
|
async def _stream_protocol_messages(
self,
protocol_class: Type[ProtocolAPI],
) -> AsyncIterator[CommandAPI[Any]]:
"""
Stream the messages for the specified protocol.
"""
async with self._protocol_locks[protocol_class]:
msg_queue = self._protocol_queues[protocol_class]
if not hasattr(self, "_multiplex_token"):
raise Exception("Multiplexer is not multiplexed")
token = self._multiplex_token
while not self.is_closing and not token.triggered:
try:
# We use an optimistic strategy here of using
# `get_nowait()` to reduce the number of times we yield to
# the event loop. Since this is an async generator it will
# yield to the loop each time it returns a value so we
# don't have to worry about this blocking other processes.
yield msg_queue.get_nowait()
except asyncio.QueueEmpty:
yield await msg_queue.get()
|
async def _stream_protocol_messages(
self,
protocol_class: Type[ProtocolAPI],
) -> AsyncIterator[CommandAPI[Any]]:
"""
Stream the messages for the specified protocol.
"""
async with self._protocol_locks.lock(protocol_class):
msg_queue = self._protocol_queues[protocol_class]
if not hasattr(self, "_multiplex_token"):
raise Exception("Multiplexer is not multiplexed")
token = self._multiplex_token
while not self.is_closing and not token.triggered:
try:
# We use an optimistic strategy here of using
# `get_nowait()` to reduce the number of times we yield to
# the event loop. Since this is an async generator it will
# yield to the loop each time it returns a value so we
# don't have to worry about this blocking other processes.
yield msg_queue.get_nowait()
except asyncio.QueueEmpty:
yield await self.wait(msg_queue.get(), token=token)
|
https://github.com/ethereum/trinity/issues/1405
|
DEBUG 12-18 15:21:47 async_process_runner.py b'\x1b[1m\x1b[31m ERROR 2019-12-18 15:21:47,268 LESPeerPool unexpected error during peer connection\n'
DEBUG 12-18 15:21:47 async_process_runner.py b'Traceback (most recent call last):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 209, in maybe_connect_more_peers\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' for backend in self.peer_backends\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 191, in _add_peers_from_backend\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' await self.connect_to_nodes(iter(candidates))\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 419, in connect_to_nodes\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' loop=self.get_event_loop(),\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 440, in connect_to_node\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' async with self.lock_node_for_handshake(node):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 423, in lock_node_for_handshake\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self._handshake_locks.lock(node)\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/resource_lock.py", line 18, in lock\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' lock = self._locks[resource]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/.tox/py36-wheel-cli/lib/python3.6/weakref.py", line 394, in __getitem__\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self.data[ref(key)]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b"KeyError: <weakref at 0x7f48b9d9cf48; to 'Node' at 0x7f48b83175c0>\x1b[0m\n"
|
KeyError
|
def lock_node_for_handshake(self, node: NodeAPI) -> AsyncContextManager[None]:
return self._handshake_locks.lock(node)
|
def lock_node_for_handshake(self, node: NodeAPI) -> asyncio.Lock:
return self._handshake_locks.lock(node)
|
https://github.com/ethereum/trinity/issues/1405
|
DEBUG 12-18 15:21:47 async_process_runner.py b'\x1b[1m\x1b[31m ERROR 2019-12-18 15:21:47,268 LESPeerPool unexpected error during peer connection\n'
DEBUG 12-18 15:21:47 async_process_runner.py b'Traceback (most recent call last):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 209, in maybe_connect_more_peers\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' for backend in self.peer_backends\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 191, in _add_peers_from_backend\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' await self.connect_to_nodes(iter(candidates))\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 419, in connect_to_nodes\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' loop=self.get_event_loop(),\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 440, in connect_to_node\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' async with self.lock_node_for_handshake(node):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 423, in lock_node_for_handshake\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self._handshake_locks.lock(node)\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/resource_lock.py", line 18, in lock\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' lock = self._locks[resource]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/.tox/py36-wheel-cli/lib/python3.6/weakref.py", line 394, in __getitem__\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self.data[ref(key)]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b"KeyError: <weakref at 0x7f48b9d9cf48; to 'Node' at 0x7f48b83175c0>\x1b[0m\n"
|
KeyError
|
def __init__(self) -> None:
self._locks = {}
self._reference_counts = defaultdict(int)
|
def __init__(self) -> None:
self._locks = weakref.WeakKeyDictionary()
|
https://github.com/ethereum/trinity/issues/1405
|
DEBUG 12-18 15:21:47 async_process_runner.py b'\x1b[1m\x1b[31m ERROR 2019-12-18 15:21:47,268 LESPeerPool unexpected error during peer connection\n'
DEBUG 12-18 15:21:47 async_process_runner.py b'Traceback (most recent call last):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 209, in maybe_connect_more_peers\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' for backend in self.peer_backends\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 191, in _add_peers_from_backend\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' await self.connect_to_nodes(iter(candidates))\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 419, in connect_to_nodes\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' loop=self.get_event_loop(),\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 440, in connect_to_node\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' async with self.lock_node_for_handshake(node):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 423, in lock_node_for_handshake\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self._handshake_locks.lock(node)\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/resource_lock.py", line 18, in lock\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' lock = self._locks[resource]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/.tox/py36-wheel-cli/lib/python3.6/weakref.py", line 394, in __getitem__\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self.data[ref(key)]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b"KeyError: <weakref at 0x7f48b9d9cf48; to 'Node' at 0x7f48b83175c0>\x1b[0m\n"
|
KeyError
|
async def lock(self, resource: TResource) -> AsyncIterator[None]:
if resource not in self._locks:
self._locks[resource] = asyncio.Lock()
try:
self._reference_counts[resource] += 1
async with self._locks[resource]:
yield
finally:
self._reference_counts[resource] -= 1
if self._reference_counts[resource] <= 0:
del self._reference_counts[resource]
del self._locks[resource]
|
def lock(self, resource: Hashable) -> asyncio.Lock:
if resource not in self._locks:
self._locks[resource] = asyncio.Lock()
lock = self._locks[resource]
return lock
|
https://github.com/ethereum/trinity/issues/1405
|
DEBUG 12-18 15:21:47 async_process_runner.py b'\x1b[1m\x1b[31m ERROR 2019-12-18 15:21:47,268 LESPeerPool unexpected error during peer connection\n'
DEBUG 12-18 15:21:47 async_process_runner.py b'Traceback (most recent call last):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 209, in maybe_connect_more_peers\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' for backend in self.peer_backends\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 191, in _add_peers_from_backend\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' await self.connect_to_nodes(iter(candidates))\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 419, in connect_to_nodes\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' loop=self.get_event_loop(),\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 440, in connect_to_node\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' async with self.lock_node_for_handshake(node):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 423, in lock_node_for_handshake\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self._handshake_locks.lock(node)\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/resource_lock.py", line 18, in lock\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' lock = self._locks[resource]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/.tox/py36-wheel-cli/lib/python3.6/weakref.py", line 394, in __getitem__\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self.data[ref(key)]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b"KeyError: <weakref at 0x7f48b9d9cf48; to 'Node' at 0x7f48b83175c0>\x1b[0m\n"
|
KeyError
|
def is_locked(self, resource: TResource) -> bool:
if resource not in self._locks:
return False
else:
return self._locks[resource].locked()
|
def is_locked(self, resource: Hashable) -> bool:
if resource not in self._locks:
return False
else:
return self._locks[resource].locked()
|
https://github.com/ethereum/trinity/issues/1405
|
DEBUG 12-18 15:21:47 async_process_runner.py b'\x1b[1m\x1b[31m ERROR 2019-12-18 15:21:47,268 LESPeerPool unexpected error during peer connection\n'
DEBUG 12-18 15:21:47 async_process_runner.py b'Traceback (most recent call last):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 209, in maybe_connect_more_peers\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' for backend in self.peer_backends\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 191, in _add_peers_from_backend\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' await self.connect_to_nodes(iter(candidates))\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 419, in connect_to_nodes\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' loop=self.get_event_loop(),\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 440, in connect_to_node\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' async with self.lock_node_for_handshake(node):\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/peer_pool.py", line 423, in lock_node_for_handshake\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self._handshake_locks.lock(node)\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/p2p/resource_lock.py", line 18, in lock\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' lock = self._locks[resource]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' File "/home/circleci/repo/.tox/py36-wheel-cli/lib/python3.6/weakref.py", line 394, in __getitem__\n'
DEBUG 12-18 15:21:47 async_process_runner.py b' return self.data[ref(key)]\n'
DEBUG 12-18 15:21:47 async_process_runner.py b"KeyError: <weakref at 0x7f48b9d9cf48; to 'Node' at 0x7f48b83175c0>\x1b[0m\n"
|
KeyError
|
async def schedule_segment(
self, parent_header: BlockHeader, gap_length: int, skeleton_peer: TChainPeer
) -> None:
"""
:param parent_header: the parent of the gap to fill
:param gap_length: how long is the header gap
:param skeleton_peer: the peer that provided the parent_header - will not use to fill gaps
"""
try:
await self.wait(
self._filler_header_tasks.add(((parent_header, gap_length, skeleton_peer),))
)
except ValidationError as exc:
self.logger.debug(
"Tried to re-add a duplicate list of headers to the download queue: %s",
exc,
)
|
async def schedule_segment(
self, parent_header: BlockHeader, gap_length: int, skeleton_peer: TChainPeer
) -> None:
"""
:param parent_header: the parent of the gap to fill
:param gap_length: how long is the header gap
:param skeleton_peer: the peer that provided the parent_header - will not use to fill gaps
"""
await self.wait(
self._filler_header_tasks.add(((parent_header, gap_length, skeleton_peer),))
)
|
https://github.com/ethereum/trinity/issues/1083
|
ERROR 09-09 11:44:41 ETHHeaderChainSyncer Unexpected error in <trinity.protocol.eth.sync.ETHHeaderChainSyncer object at 0x7fc3dfd1beb8>, exiting
Traceback (most recent call last):
File "/home/ubuntu/trinity/p2p/service.py", line 118, in run
File "/home/ubuntu/trinity/trinity/sync/common/headers.py", line 866, in _run
File "/home/ubuntu/trinity/p2p/cancellable.py", line 20, in wait
return await self.wait_first(awaitable, token=token, timeout=timeout)
File "/home/ubuntu/trinity/p2p/cancellable.py", line 43, in wait_first
return await token_chain.cancellable_wait(*awaitables, timeout=timeout)
File "/home/ubuntu/trinity/venv/lib/python3.6/site-packages/cancel_token/token.py", line 178, in cancellable_wait
return done.pop().result()
File "/home/ubuntu/trinity/trinity/sync/common/headers.py", line 880, in _build_skeleton
File "/home/ubuntu/trinity/trinity/sync/common/headers.py", line 961, in _full_skeleton_sync
File "/home/ubuntu/trinity/p2p/cancellable.py", line 20, in wait
return await self.wait_first(awaitable, token=token, timeout=timeout)
File "/home/ubuntu/trinity/p2p/cancellable.py", line 43, in wait_first
return await token_chain.cancellable_wait(*awaitables, timeout=timeout)
File "/home/ubuntu/trinity/venv/lib/python3.6/site-packages/cancel_token/token.py", line 178, in cancellable_wait
return done.pop().result()
File "/home/ubuntu/trinity/trinity/sync/common/headers.py", line 596, in schedule_segment
File "/home/ubuntu/trinity/p2p/cancellable.py", line 20, in wait
return await self.wait_first(awaitable, token=token, timeout=timeout)
File "/home/ubuntu/trinity/p2p/cancellable.py", line 43, in wait_first
return await token_chain.cancellable_wait(*awaitables, timeout=timeout)
File "/home/ubuntu/trinity/venv/lib/python3.6/site-packages/cancel_token/token.py", line 178, in cancellable_wait
return done.pop().result()
File "/home/ubuntu/trinity/trinity/_utils/datastructures.py", line 160, in add
eth_utils.exceptions.ValidationError: Duplicate tasks detected: {(<eth.rlp.headers.BlockHeader object at 0x7fc39a1aee80>, 192, ETHPeer <Node(0xd64684e7a34c4f0aaaa07e1f6143ec91610b99b120b441387356def326890b40867a4aa4384af7fe4088858c161a05efb61499c35d194b4f46c4a0cac82f2ee2@35.193.221.102:40780)>)} are already present in the queue
|
eth_utils.exceptions.ValidationError
|
def _unpack_v4(
message: bytes,
) -> Tuple[datatypes.PublicKey, int, Tuple[Any, ...], Hash32]:
"""Unpack a discovery v4 UDP message received from a remote node.
Returns the public key used to sign the message, the cmd ID, payload and hash.
"""
message_hash = Hash32(message[:MAC_SIZE])
if message_hash != keccak(message[MAC_SIZE:]):
raise WrongMAC("Wrong msg mac")
signature = keys.Signature(message[MAC_SIZE:HEAD_SIZE])
signed_data = message[HEAD_SIZE:]
remote_pubkey = signature.recover_public_key_from_msg(signed_data)
cmd_id = message[HEAD_SIZE]
try:
cmd = CMD_ID_MAP[cmd_id]
except KeyError as e:
raise UnknownCommand(f"Invalid Command ID {cmd_id}") from e
payload = tuple(rlp.decode(message[HEAD_SIZE + 1 :], strict=False))
# Ignore excessive list elements as required by EIP-8.
payload = payload[: cmd.elem_count]
return remote_pubkey, cmd_id, payload, message_hash
|
def _unpack_v4(
message: bytes,
) -> Tuple[datatypes.PublicKey, int, Tuple[Any, ...], Hash32]:
"""Unpack a discovery v4 UDP message received from a remote node.
Returns the public key used to sign the message, the cmd ID, payload and hash.
"""
message_hash = Hash32(message[:MAC_SIZE])
if message_hash != keccak(message[MAC_SIZE:]):
raise WrongMAC("Wrong msg mac")
signature = keys.Signature(message[MAC_SIZE:HEAD_SIZE])
signed_data = message[HEAD_SIZE:]
remote_pubkey = signature.recover_public_key_from_msg(signed_data)
cmd_id = message[HEAD_SIZE]
cmd = CMD_ID_MAP[cmd_id]
payload = tuple(rlp.decode(message[HEAD_SIZE + 1 :], strict=False))
# Ignore excessive list elements as required by EIP-8.
payload = payload[: cmd.elem_count]
return remote_pubkey, cmd_id, payload, message_hash
|
https://github.com/ethereum/trinity/issues/1062
|
ERROR 09-05 09:51:49 asyncio Exception in callback UDPTransport._on_read_ready
handle: <Handle UDPTransport._on_read_ready>
Traceback (most recent call last):
File "uvloop/cbhandles.pyx", line 69, in uvloop.loop.Handle._run
File "uvloop/handles/udp.pyx", line 64, in uvloop.loop.UDPTransport._on_read_ready
File "/home/ubuntu/trinity/p2p/discovery.py", line 448, in datagram_received
self.receive(address, cast(bytes, data))
File "/home/ubuntu/trinity/p2p/discovery.py", line 463, in receive
remote_pubkey, cmd_id, payload, message_hash = _unpack_v4(message)
File "/home/ubuntu/trinity/p2p/discovery.py", line 1261, in _unpack_v4
cmd = CMD_ID_MAP[cmd_id]
KeyError: 9
|
KeyError
|
async def receive_handshake(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
def _cleanup_reader_and_writer() -> None:
if not reader.at_eof():
reader.feed_eof()
writer.close()
try:
await self._receive_handshake(reader, writer)
except COMMON_RECEIVE_HANDSHAKE_EXCEPTIONS as e:
self.logger.debug("Could not complete handshake: %s", e)
_cleanup_reader_and_writer()
except OperationCancelled:
pass
except Exception as e:
self.logger.exception("Unexpected error handling handshake")
_cleanup_reader_and_writer()
|
async def receive_handshake(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
expected_exceptions = (
TimeoutError,
PeerConnectionLost,
HandshakeFailure,
NoMatchingPeerCapabilities,
asyncio.IncompleteReadError,
)
def _cleanup_reader_and_writer() -> None:
if not reader.at_eof():
reader.feed_eof()
writer.close()
try:
await self._receive_handshake(reader, writer)
except expected_exceptions as e:
self.logger.debug("Could not complete handshake: %s", e)
_cleanup_reader_and_writer()
except OperationCancelled:
pass
except Exception as e:
self.logger.exception("Unexpected error handling handshake")
_cleanup_reader_and_writer()
|
https://github.com/ethereum/trinity/issues/1031
|
ERROR 09-02 13:55:52 FullServer Unexpected error handling handshake
Traceback (most recent call last):
File "/home/ubuntu/trinity/trinity/server.py", line 156, in receive_handshake
await self._receive_handshake(reader, writer)
File "/home/ubuntu/trinity/trinity/server.py", line 176, in _receive_handshake
token=self.cancel_token,
File "/home/ubuntu/trinity/p2p/peer.py", line 133, in receive_handshake
token=token,
File "/home/ubuntu/trinity/p2p/handshake.py", line 268, in negotiate_protocol_handshakes
"Found no matching capabilities between self and peer:\n"
p2p.exceptions.NoMatchingPeerCapabilities: Found no matching capabilities between self and peer:
- local : (('eth', 63),)
- remote: (('pip', 1),)
|
p2p.exceptions.NoMatchingPeerCapabilities
|
async def receive_handshake(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
expected_exceptions = (
TimeoutError,
PeerConnectionLost,
HandshakeFailure,
NoMatchingPeerCapabilities,
asyncio.IncompleteReadError,
)
def _cleanup_reader_and_writer() -> None:
if not reader.at_eof():
reader.feed_eof()
writer.close()
try:
await self._receive_handshake(reader, writer)
except expected_exceptions as e:
self.logger.debug("Could not complete handshake: %s", e)
_cleanup_reader_and_writer()
except OperationCancelled:
pass
except Exception as e:
self.logger.exception("Unexpected error handling handshake")
_cleanup_reader_and_writer()
|
async def receive_handshake(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
expected_exceptions = (
TimeoutError,
PeerConnectionLost,
HandshakeFailure,
asyncio.IncompleteReadError,
)
def _cleanup_reader_and_writer() -> None:
if not reader.at_eof():
reader.feed_eof()
writer.close()
try:
await self._receive_handshake(reader, writer)
except expected_exceptions as e:
self.logger.debug("Could not complete handshake: %s", e)
_cleanup_reader_and_writer()
except OperationCancelled:
pass
except Exception as e:
self.logger.exception("Unexpected error handling handshake")
_cleanup_reader_and_writer()
|
https://github.com/ethereum/trinity/issues/1031
|
ERROR 09-02 13:55:52 FullServer Unexpected error handling handshake
Traceback (most recent call last):
File "/home/ubuntu/trinity/trinity/server.py", line 156, in receive_handshake
await self._receive_handshake(reader, writer)
File "/home/ubuntu/trinity/trinity/server.py", line 176, in _receive_handshake
token=self.cancel_token,
File "/home/ubuntu/trinity/p2p/peer.py", line 133, in receive_handshake
token=token,
File "/home/ubuntu/trinity/p2p/handshake.py", line 268, in negotiate_protocol_handshakes
"Found no matching capabilities between self and peer:\n"
p2p.exceptions.NoMatchingPeerCapabilities: Found no matching capabilities between self and peer:
- local : (('eth', 63),)
- remote: (('pip', 1),)
|
p2p.exceptions.NoMatchingPeerCapabilities
|
async def do_p2p_handshake(self) -> None:
"""Perform the handshake for the P2P base protocol.
Raises HandshakeFailure if the handshake is not successful.
"""
self.base_protocol.send_handshake()
cmd, msg = await self.read_msg()
if isinstance(cmd, Disconnect):
msg = cast(Dict[str, Any], msg)
# Peers sometimes send a disconnect msg before they send the initial P2P handshake.
if msg["reason"] == DisconnectReason.too_many_peers.value:
raise TooManyPeersFailure(f"{self} disconnected from us before handshake")
raise HandshakeFailure(
f"{self} disconnected before completing sub-proto handshake: {msg['reason_name']}"
)
await self.process_p2p_handshake(cmd, msg)
|
async def do_p2p_handshake(self) -> None:
"""Perform the handshake for the P2P base protocol.
Raises HandshakeFailure if the handshake is not successful.
"""
self.base_protocol.send_handshake()
try:
cmd, msg = await self.read_msg()
except rlp.DecodingError:
raise HandshakeFailure("Got invalid rlp data during handshake")
except MalformedMessage as e:
raise HandshakeFailure("Got malformed message during handshake") from e
if isinstance(cmd, Disconnect):
msg = cast(Dict[str, Any], msg)
# Peers sometimes send a disconnect msg before they send the initial P2P handshake.
if msg["reason"] == DisconnectReason.too_many_peers.value:
raise TooManyPeersFailure(f"{self} disconnected from us before handshake")
raise HandshakeFailure(
f"{self} disconnected before completing sub-proto handshake: {msg['reason_name']}"
)
await self.process_p2p_handshake(cmd, msg)
|
https://github.com/ethereum/trinity/issues/292
|
b'\x1b[1m\x1b[31m ERROR 02-19 09:50:08 ETHPeerPool Unexpected error during auth/p2p handshake with <Node(0x865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303)>\n'
b'Traceback (most recent call last):\n'
b' File "/home/circleci/repo/p2p/peer_pool.py", line 255, in connect\n'
b' peer = await self.wait(handshake(remote, self.get_peer_factory()))\n'
b' File "/home/circleci/repo/p2p/cancellable.py", line 20, in wait\n'
b' return await self.wait_first(awaitable, token=token, timeout=timeout)\n'
b' File "/home/circleci/repo/p2p/cancellable.py", line 42, in wait_first\n'
b' return await token_chain.cancellable_wait(*awaitables, timeout=timeout)\n'
b' File "/home/circleci/repo/.tox/py36-long_run_integration/lib/python3.6/site-packages/cancel_token/token.py", line 152, in cancellable_wait\n'
b' return done.pop().result()\n'
b' File "/home/circleci/repo/p2p/peer.py", line 128, in handshake\n'
b' await peer.do_sub_proto_handshake()\n'
b' File "/home/circleci/repo/p2p/peer.py", line 321, in do_sub_proto_handshake\n'
b' cmd, msg = await self.read_msg()\n'
b' File "/home/circleci/repo/p2p/peer.py", line 460, in read_msg\n'
b' decoded_msg = cast(Dict[str, Any], cmd.decode(msg))\n'
b' File "/home/circleci/repo/p2p/p2p_proto.py", line 78, in decode\n'
b' raw_decoded = cast(Dict[str, int], super().decode(data))\n'
b' File "/home/circleci/repo/p2p/protocol.py", line 123, in decode\n'
b' encoded_payload = self.decompress_payload(compressed_payload)\n'
b' File "/home/circleci/repo/p2p/protocol.py", line 130, in decompress_payload\n'
b' return snappy.decompress(raw_payload)\n'
b' File "/home/circleci/repo/.tox/py36-long_run_integration/lib/python3.6/site-packages/snappy/snappy.py", line 92, in uncompress\n'
b' return _uncompress(data)\n'
b'snappy.CompressedLengthError: Can not calculate uncompressed length\x1b[0m\n'
|
snappy.CompressedLengthError
|
def decompress_payload(self, raw_payload: bytes) -> bytes:
# Do the Snappy Decompression only if Snappy Compression is supported by the protocol
if self.snappy_support:
try:
return snappy.decompress(raw_payload)
except Exception as err:
# log this just in case it's a library error of some kind on valid messages.
self.logger.debug(
"Snappy decompression error on payload: %s", raw_payload.hex()
)
raise MalformedMessage from err
else:
return raw_payload
|
def decompress_payload(self, raw_payload: bytes) -> bytes:
# Do the Snappy Decompression only if Snappy Compression is supported by the protocol
if self.snappy_support:
return snappy.decompress(raw_payload)
else:
return raw_payload
|
https://github.com/ethereum/trinity/issues/292
|
b'\x1b[1m\x1b[31m ERROR 02-19 09:50:08 ETHPeerPool Unexpected error during auth/p2p handshake with <Node(0x865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303)>\n'
b'Traceback (most recent call last):\n'
b' File "/home/circleci/repo/p2p/peer_pool.py", line 255, in connect\n'
b' peer = await self.wait(handshake(remote, self.get_peer_factory()))\n'
b' File "/home/circleci/repo/p2p/cancellable.py", line 20, in wait\n'
b' return await self.wait_first(awaitable, token=token, timeout=timeout)\n'
b' File "/home/circleci/repo/p2p/cancellable.py", line 42, in wait_first\n'
b' return await token_chain.cancellable_wait(*awaitables, timeout=timeout)\n'
b' File "/home/circleci/repo/.tox/py36-long_run_integration/lib/python3.6/site-packages/cancel_token/token.py", line 152, in cancellable_wait\n'
b' return done.pop().result()\n'
b' File "/home/circleci/repo/p2p/peer.py", line 128, in handshake\n'
b' await peer.do_sub_proto_handshake()\n'
b' File "/home/circleci/repo/p2p/peer.py", line 321, in do_sub_proto_handshake\n'
b' cmd, msg = await self.read_msg()\n'
b' File "/home/circleci/repo/p2p/peer.py", line 460, in read_msg\n'
b' decoded_msg = cast(Dict[str, Any], cmd.decode(msg))\n'
b' File "/home/circleci/repo/p2p/p2p_proto.py", line 78, in decode\n'
b' raw_decoded = cast(Dict[str, int], super().decode(data))\n'
b' File "/home/circleci/repo/p2p/protocol.py", line 123, in decode\n'
b' encoded_payload = self.decompress_payload(compressed_payload)\n'
b' File "/home/circleci/repo/p2p/protocol.py", line 130, in decompress_payload\n'
b' return snappy.decompress(raw_payload)\n'
b' File "/home/circleci/repo/.tox/py36-long_run_integration/lib/python3.6/site-packages/snappy/snappy.py", line 92, in uncompress\n'
b' return _uncompress(data)\n'
b'snappy.CompressedLengthError: Can not calculate uncompressed length\x1b[0m\n'
|
snappy.CompressedLengthError
|
def _parse_parameter_file(self):
# hardcoded for now
# These should be explicitly obtained from the file, but for now that
# will wait until a reorganization of the source tree and better
# generalization.
self.dimensionality = 3
self.refine_by = 2
self.parameters["HydroMethod"] = "ramses"
self.parameters["Time"] = 1.0 # default unit is 1...
# We now execute the same logic Oliver's code does
rheader = {}
def read_rhs(f, cast):
line = f.readline().replace("\n", "")
p, v = line.split("=")
rheader[p.strip()] = cast(v.strip())
with open(self.parameter_filename) as f:
for _ in range(6):
read_rhs(f, int)
f.readline()
for _ in range(11):
read_rhs(f, float)
f.readline()
read_rhs(f, str)
# This next line deserves some comment. We specify a min_level that
# corresponds to the minimum level in the RAMSES simulation. RAMSES is
# one-indexed, but it also does refer to the *oct* dimensions -- so
# this means that a levelmin of 1 would have *1* oct in it. So a
# levelmin of 2 would have 8 octs at the root mesh level.
self.min_level = rheader["levelmin"] - 1
# Now we read the hilbert indices
self.hilbert_indices = {}
if rheader["ordering type"] == "hilbert":
f.readline() # header
for _ in range(rheader["ncpu"]):
dom, mi, ma = f.readline().split()
self.hilbert_indices[int(dom)] = (float(mi), float(ma))
if rheader["ordering type"] != "hilbert" and self._bbox is not None:
raise NotImplementedError(
"The ordering %s is not compatible with the `bbox` argument."
% rheader["ordering type"]
)
self.parameters.update(rheader)
self.domain_left_edge = np.zeros(3, dtype="float64")
self.domain_dimensions = np.ones(3, dtype="int32") * 2 ** (self.min_level + 1)
self.domain_right_edge = np.ones(3, dtype="float64")
# This is likely not true, but it's not clear
# how to determine the boundary conditions
self.periodicity = (True, True, True)
if self.force_cosmological is not None:
is_cosmological = self.force_cosmological
else:
# These conditions seem to always be true for non-cosmological datasets
is_cosmological = not (
rheader["time"] >= 0 and rheader["H0"] == 1 and rheader["aexp"] == 1
)
if not is_cosmological:
self.cosmological_simulation = 0
self.current_redshift = 0
self.hubble_constant = 0
self.omega_matter = 0
self.omega_lambda = 0
else:
self.cosmological_simulation = 1
self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
self.omega_lambda = rheader["omega_l"]
self.omega_matter = rheader["omega_m"]
self.hubble_constant = rheader["H0"] / 100.0 # This is H100
force_max_level, convention = self._force_max_level
if convention == "yt":
force_max_level += self.min_level + 1
self.max_level = min(force_max_level, rheader["levelmax"]) - self.min_level - 1
if self.cosmological_simulation == 0:
self.current_time = self.parameters["time"]
else:
self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = friedman(
self.omega_matter,
self.omega_lambda,
1.0 - self.omega_matter - self.omega_lambda,
)
age = self.parameters["time"]
iage = 1 + int(10.0 * age / self.dtau)
iage = np.min([iage, self.n_frw // 2 + (iage - self.n_frw // 2) // 10])
try:
self.time_simu = self.t_frw[iage] * (age - self.tau_frw[iage - 1]) / (
self.tau_frw[iage] - self.tau_frw[iage - 1]
) + self.t_frw[iage - 1] * (age - self.tau_frw[iage]) / (
self.tau_frw[iage - 1] - self.tau_frw[iage]
)
self.current_time = (
(self.time_tot + self.time_simu)
/ (self.hubble_constant * 1e7 / 3.08e24)
/ self.parameters["unit_t"]
)
except IndexError:
mylog.warning(
"Yt could not convert conformal time to physical time. "
"Yt will assume the simulation is *not* cosmological."
)
self.cosmological_simulation = 0
self.current_time = self.parameters["time"]
if self.num_groups > 0:
self.group_size = rheader["ncpu"] // self.num_groups
# Read namelist.txt file (if any)
self.read_namelist()
|
def _parse_parameter_file(self):
# hardcoded for now
# These should be explicitly obtained from the file, but for now that
# will wait until a reorganization of the source tree and better
# generalization.
self.dimensionality = 3
self.refine_by = 2
self.parameters["HydroMethod"] = "ramses"
self.parameters["Time"] = 1.0 # default unit is 1...
# We now execute the same logic Oliver's code does
rheader = {}
def read_rhs(f, cast):
line = f.readline().replace("\n", "")
p, v = line.split("=")
rheader[p.strip()] = cast(v.strip())
with open(self.parameter_filename) as f:
for _ in range(6):
read_rhs(f, int)
f.readline()
for _ in range(11):
read_rhs(f, float)
f.readline()
read_rhs(f, str)
# This next line deserves some comment. We specify a min_level that
# corresponds to the minimum level in the RAMSES simulation. RAMSES is
# one-indexed, but it also does refer to the *oct* dimensions -- so
# this means that a levelmin of 1 would have *1* oct in it. So a
# levelmin of 2 would have 8 octs at the root mesh level.
self.min_level = rheader["levelmin"] - 1
# Now we read the hilbert indices
self.hilbert_indices = {}
if rheader["ordering type"] == "hilbert":
f.readline() # header
for _ in range(rheader["ncpu"]):
dom, mi, ma = f.readline().split()
self.hilbert_indices[int(dom)] = (float(mi), float(ma))
if rheader["ordering type"] != "hilbert" and self._bbox is not None:
raise NotImplementedError(
"The ordering %s is not compatible with the `bbox` argument."
% rheader["ordering type"]
)
self.parameters.update(rheader)
self.domain_left_edge = np.zeros(3, dtype="float64")
self.domain_dimensions = np.ones(3, dtype="int32") * 2 ** (self.min_level + 1)
self.domain_right_edge = np.ones(3, dtype="float64")
# This is likely not true, but it's not clear
# how to determine the boundary conditions
self.periodicity = (True, True, True)
if self.force_cosmological is not None:
is_cosmological = self.force_cosmological
else:
# These conditions seem to always be true for non-cosmological datasets
is_cosmological = not (
rheader["time"] >= 0 and rheader["H0"] == 1 and rheader["aexp"] == 1
)
if not is_cosmological:
self.cosmological_simulation = 0
self.current_redshift = 0
self.hubble_constant = 0
self.omega_matter = 0
self.omega_lambda = 0
else:
self.cosmological_simulation = 1
self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
self.omega_lambda = rheader["omega_l"]
self.omega_matter = rheader["omega_m"]
self.hubble_constant = rheader["H0"] / 100.0 # This is H100
force_max_level, convention = self._force_max_level
if convention == "yt":
force_max_level += self.min_level + 1
self.max_level = min(force_max_level, rheader["levelmax"]) - self.min_level - 1
if self.cosmological_simulation == 0:
self.current_time = self.parameters["time"]
else:
self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = friedman(
self.omega_matter,
self.omega_lambda,
1.0 - self.omega_matter - self.omega_lambda,
)
age = self.parameters["time"]
iage = 1 + int(10.0 * age / self.dtau)
iage = np.min([iage, self.n_frw // 2 + (iage - self.n_frw // 2) // 10])
self.time_simu = self.t_frw[iage] * (age - self.tau_frw[iage - 1]) / (
self.tau_frw[iage] - self.tau_frw[iage - 1]
) + self.t_frw[iage - 1] * (age - self.tau_frw[iage]) / (
self.tau_frw[iage - 1] - self.tau_frw[iage]
)
self.current_time = (
(self.time_tot + self.time_simu)
/ (self.hubble_constant * 1e7 / 3.08e24)
/ self.parameters["unit_t"]
)
if self.num_groups > 0:
self.group_size = rheader["ncpu"] // self.num_groups
# Read namelist.txt file (if any)
self.read_namelist()
|
https://github.com/yt-project/yt/issues/2959
|
IndexError Traceback (most recent call last)
<ipython-input-7-49e2114551c4> in <module>
----> 1 ds = yt.load('output_00131/info_00131.txt')
~/.local/lib/python3.6/site-packages/yt/convenience.py in load(*args, **kwargs)
84 candidates = find_lowest_subclasses(candidates)
85 if len(candidates) == 1:
---> 86 return candidates[0](*args, **kwargs)
87 if len(candidates) == 0:
88 if ytcfg.get("yt", "enzo_db") != '' \
~/.local/lib/python3.6/site-packages/yt/frontends/ramses/data_structures.py in __init__(self, filename, dataset_type, fields, storage_filename, units_override, unit_system, extra_particle_fields, cosmological, bbox)
410
411 Dataset.__init__(self, filename, dataset_type, units_override=units_override,
--> 412 unit_system=unit_system)
413
414 # Add the particle types
~/.local/lib/python3.6/site-packages/yt/data_objects/static_output.py in __init__(self, filename, dataset_type, file_style, units_override, unit_system)
247 self._create_unit_registry()
248
--> 249 self._parse_parameter_file()
250 self.set_units()
251 self._assign_unit_system(unit_system)
~/.local/lib/python3.6/site-packages/yt/frontends/ramses/data_structures.py in _parse_parameter_file(self)
575 iage = np.min([iage,self.n_frw//2 + (iage - self.n_frw//2)//10])
576
--> 577 self.time_simu = self.t_frw[iage ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
578 self.t_frw[iage-1]*(age-self.tau_frw[iage ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
579
IndexError: index 2686 is out of bounds for axis 0 with size 1001
|
IndexError
|
def _set_center(self, center):
if center is None:
self.center = None
return
elif isinstance(center, YTArray):
self.center = self.ds.arr(center.astype("float64"))
self.center.convert_to_units("code_length")
elif isinstance(center, (list, tuple, np.ndarray)):
if isinstance(center[0], YTQuantity):
self.center = self.ds.arr([c.copy() for c in center], dtype="float64")
self.center.convert_to_units("code_length")
else:
self.center = self.ds.arr(center, "code_length", dtype="float64")
elif isinstance(center, str):
if center.lower() in ("c", "center"):
self.center = self.ds.domain_center
# is this dangerous for race conditions?
elif center.lower() in ("max", "m"):
self.center = self.ds.find_max(("gas", "density"))[1]
elif center.startswith("max_"):
self.center = self.ds.find_max(center[4:])[1]
elif center.lower() == "min":
self.center = self.ds.find_min(("gas", "density"))[1]
elif center.startswith("min_"):
self.center = self.ds.find_min(center[4:])[1]
else:
self.center = self.ds.arr(center, "code_length", dtype="float64")
if self.center.ndim > 1:
mylog.debug("Removing singleton dimensions from 'center'.")
self.center = np.squeeze(self.center)
if self.center.ndim > 1:
msg = (
"center array must be 1 dimensional, supplied center has "
f"{self.center.ndim} dimensions with shape {self.center.shape}."
)
raise YTException(msg)
self.set_field_parameter("center", self.center)
|
def _set_center(self, center):
if center is None:
self.center = None
return
elif isinstance(center, YTArray):
self.center = self.ds.arr(center.astype("float64"))
self.center.convert_to_units("code_length")
elif isinstance(center, (list, tuple, np.ndarray)):
if isinstance(center[0], YTQuantity):
self.center = self.ds.arr([c.copy() for c in center], dtype="float64")
self.center.convert_to_units("code_length")
else:
self.center = self.ds.arr(center, "code_length", dtype="float64")
elif isinstance(center, str):
if center.lower() in ("c", "center"):
self.center = self.ds.domain_center
# is this dangerous for race conditions?
elif center.lower() in ("max", "m"):
self.center = self.ds.find_max(("gas", "density"))[1]
elif center.startswith("max_"):
self.center = self.ds.find_max(center[4:])[1]
elif center.lower() == "min":
self.center = self.ds.find_min(("gas", "density"))[1]
elif center.startswith("min_"):
self.center = self.ds.find_min(center[4:])[1]
else:
self.center = self.ds.arr(center, "code_length", dtype="float64")
self.set_field_parameter("center", self.center)
|
https://github.com/yt-project/yt/issues/2393
|
Traceback (most recent call last):
File "/tmp/fail.py", line 13, in <module>
sp['density']
File "/home/ccc/Documents/prog/yt/yt/data_objects/data_containers.py", line 256, in __getitem__
self.get_data(f)
File "/home/ccc/Documents/prog/yt/yt/data_objects/data_containers.py", line 1482, in get_data
self.index._identify_base_chunk(self)
File "/home/ccc/Documents/prog/yt/yt/frontends/ramses/data_structures.py", line 277, in _identify_base_chunk
domains = [dom for dom in self.domains if
File "/home/ccc/Documents/prog/yt/yt/frontends/ramses/data_structures.py", line 278, in <listcomp>
dom.included(dobj.selector)]
File "/home/ccc/Documents/prog/yt/yt/data_objects/data_containers.py", line 1434, in selector
self._selector = sclass(self)
File "yt/geometry/selection_routines.pyx", line 717, in yt.geometry.selection_routines.SphereSelector.__init__
self.center[i] = _ensure_code(dobj.center[i])
TypeError: only size-1 arrays can be converted to Python scalars
|
TypeError
|
def _fill_fields(self, fields):
fields = [f for f in fields if f not in self.field_data]
if len(fields) == 0:
return
ls = self._initialize_level_state(fields)
min_level = self._compute_minimum_level()
# NOTE: This usage of "refine_by" is actually *okay*, because it's
# being used with respect to iref, which is *already* scaled!
refine_by = self.ds.refine_by
if not iterable(self.ds.refine_by):
refine_by = [refine_by, refine_by, refine_by]
refine_by = np.array(refine_by, dtype="i8")
runtime_errors_count = 0
for level in range(self.level + 1):
if level < min_level:
self._update_level_state(ls)
continue
nd = self.ds.dimensionality
refinement = np.zeros_like(ls.base_dx)
refinement += self.ds.relative_refinement(0, ls.current_level)
refinement[nd:] = 1
domain_dims = self.ds.domain_dimensions * refinement
domain_dims = domain_dims.astype("int64")
tot = ls.current_dims.prod()
for chunk in ls.data_source.chunks(fields, "io"):
chunk[fields[0]]
input_fields = [chunk[field] for field in fields]
tot -= fill_region(
input_fields,
ls.fields,
ls.current_level,
ls.global_startindex,
chunk.icoords,
chunk.ires,
domain_dims,
refine_by,
)
if level == 0 and tot != 0:
runtime_errors_count += 1
self._update_level_state(ls)
if runtime_errors_count:
warnings.warn(
"Something went wrong during field computation. "
"This is likely due to missing ghost-zones support "
"in class %s",
self.ds.__class__,
category=RuntimeWarning,
)
mylog.debug(f"Caught {runtime_errors_count} runtime errors.")
for name, v in zip(fields, ls.fields):
if self.level > 0:
v = v[1:-1, 1:-1, 1:-1]
fi = self.ds._get_field_info(*name)
self[name] = self.ds.arr(v, fi.units)
|
def _fill_fields(self, fields):
fields = [f for f in fields if f not in self.field_data]
if len(fields) == 0:
return
ls = self._initialize_level_state(fields)
min_level = self._compute_minimum_level()
# NOTE: This usage of "refine_by" is actually *okay*, because it's
# being used with respect to iref, which is *already* scaled!
refine_by = self.ds.refine_by
if not iterable(self.ds.refine_by):
refine_by = [refine_by, refine_by, refine_by]
refine_by = np.array(refine_by, dtype="i8")
for level in range(self.level + 1):
if level < min_level:
self._update_level_state(ls)
continue
nd = self.ds.dimensionality
refinement = np.zeros_like(ls.base_dx)
refinement += self.ds.relative_refinement(0, ls.current_level)
refinement[nd:] = 1
domain_dims = self.ds.domain_dimensions * refinement
domain_dims = domain_dims.astype("int64")
tot = ls.current_dims.prod()
for chunk in ls.data_source.chunks(fields, "io"):
chunk[fields[0]]
input_fields = [chunk[field] for field in fields]
tot -= fill_region(
input_fields,
ls.fields,
ls.current_level,
ls.global_startindex,
chunk.icoords,
chunk.ires,
domain_dims,
refine_by,
)
if level == 0 and tot != 0:
raise RuntimeError
self._update_level_state(ls)
for name, v in zip(fields, ls.fields):
if self.level > 0:
v = v[1:-1, 1:-1, 1:-1]
fi = self.ds._get_field_info(*name)
self[name] = self.ds.arr(v, fi.units)
|
https://github.com/yt-project/yt/issues/2710
|
---------------------------------------------------------------------------
NeedsGridType Traceback (most recent call last)
~/codes/yt/yt/data_objects/data_containers.py in _generate_fluid_field(self, field)
308 try:
--> 309 finfo.check_available(gen_obj)
310 except NeedsGridType as ngt_exception:
~/codes/yt/yt/fields/derived_field.py in check_available(self, data)
210 for validator in self.validators:
--> 211 validator(data)
212 # If we don't get an exception, we're good to go
~/codes/yt/yt/fields/derived_field.py in __call__(self, data)
487 if not getattr(data, '_spatial', False):
--> 488 raise NeedsGridType(self.ghost_zones,self.fields)
489 if self.ghost_zones <= data._num_ghost_zones:
NeedsGridType: (1, [('gas', 'velocity_x'), ('gas', 'velocity_y'), ('gas', 'velocity_z')])
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-26-c39a34fa7c89> in <module>
3 ds=yt.load('/Volumes/TRANSCEND/Projects/amrvac/2020_prom_form/spex_dm_4_24_528_4224_3G/jk2020{0:04d}'.format(file_index)+'.dat', units_override=overrides)
4 #sub=ds.region(left_edge=[-11.9,0.1,0], right_edge=[11.9,24.9,0],center=)
----> 5 plot=yt.plot_2d(ds, "vorticity_magnitude")
6 plot.show()
~/codes/yt/yt/visualization/plot_window.py in plot_2d(ds, fields, center, width, axes_unit, origin, fontsize, field_parameters, window_size, aspect, data_source)
2174 field_parameters=field_parameters,
2175 window_size=window_size, aspect=aspect,
-> 2176 data_source=data_source)
~/codes/yt/yt/visualization/plot_window.py in __init__(self, ds, axis, fields, center, width, axes_unit, origin, right_handed, fontsize, field_parameters, window_size, aspect, data_source, buff_size)
1331 slc = ds.slice(axis, center[axis], field_parameters=field_parameters,
1332 center=center, data_source=data_source)
-> 1333 slc.get_data(fields)
1334 validate_mesh_fields(slc, fields)
1335 PWViewerMPL.__init__(self, slc, bounds, origin=origin,
~/codes/yt/yt/data_objects/data_containers.py in get_data(self, fields)
1617
1618 fields_to_generate += gen_fluids + gen_particles
-> 1619 self._generate_fields(fields_to_generate)
1620 for field in list(self.field_data.keys()):
1621 if field not in ofields:
~/codes/yt/yt/data_objects/data_containers.py in _generate_fields(self, fields_to_generate)
1637 fi = self.ds._get_field_info(*field)
1638 try:
-> 1639 fd = self._generate_field(field)
1640 if hasattr(fd, 'units'):
1641 fd.units.registry = self.ds.unit_registry
~/codes/yt/yt/data_objects/data_containers.py in _generate_field(self, field)
291 tr = self._generate_particle_field(field)
292 else:
--> 293 tr = self._generate_fluid_field(field)
294 if tr is None:
295 raise YTCouldNotGenerateField(field, self.ds)
~/codes/yt/yt/data_objects/data_containers.py in _generate_fluid_field(self, field)
309 finfo.check_available(gen_obj)
310 except NeedsGridType as ngt_exception:
--> 311 rv = self._generate_spatial_fluid(field, ngt_exception.ghost_zones)
312 else:
313 rv = finfo(gen_obj)
~/codes/yt/yt/data_objects/data_containers.py in _generate_spatial_fluid(self, field, ngz)
358 ind += wogz.select(
359 self.selector,
--> 360 gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz],
361 rv, ind)
362 if accumulate:
~/codes/yt/yt/data_objects/data_containers.py in __getitem__(self, key)
254 return self.field_data[f]
255 else:
--> 256 self.get_data(f)
257 # fi.units is the unit expression string. We depend on the registry
258 # hanging off the dataset to define this unit object.
~/codes/yt/yt/data_objects/construction_data_containers.py in get_data(self, fields)
727 self._fill_particles(part)
728
--> 729 if len(fill) > 0: self._fill_fields(fill)
730 for a, f in sorted(alias.items()):
731 if f.sampling_type == 'particle' and not is_sph_field:
~/codes/yt/yt/data_objects/construction_data_containers.py in _fill_fields(self, fields)
1186 if len(fields) == 0: return
1187 ls = self._initialize_level_state(fields)
-> 1188 min_level = self._compute_minimum_level()
1189 # NOTE: This usage of "refine_by" is actually *okay*, because it's
1190 # being used with respect to iref, which is *already* scaled!
~/codes/yt/yt/data_objects/construction_data_containers.py in _compute_minimum_level(self)
1172 ils.data_source.loose_selection = False
1173 min_level = self.level
-> 1174 for chunk in ils.data_source.chunks([], "io"):
1175 # With our odd selection methods, we can sometimes get no-sized ires.
1176 ir = chunk.ires
~/codes/yt/yt/data_objects/data_containers.py in chunks(self, fields, chunking_style, **kwargs)
1497 def chunks(self, fields, chunking_style, **kwargs):
1498 # This is an iterator that will yield the necessary chunks.
-> 1499 self.get_data() # Ensure we have built ourselves
1500 if fields is None: fields = []
1501 # chunk_ind can be supplied in the keyword arguments. If it's a
~/codes/yt/yt/data_objects/data_containers.py in get_data(self, fields)
1540 def get_data(self, fields=None):
1541 if self._current_chunk is None:
-> 1542 self.index._identify_base_chunk(self)
1543 if fields is None: return
1544 nfields = []
~/codes/yt/yt/geometry/grid_geometry_handler.py in _identify_base_chunk(self, dobj)
272 dobj._chunk_info[0] = weakref.proxy(dobj)
273 elif getattr(dobj, "_grids", None) is None:
--> 274 gi = dobj.selector.select_grids(self.grid_left_edge,
275 self.grid_right_edge,
276 self.grid_levels)
~/codes/yt/yt/data_objects/data_containers.py in selector(self)
1492 self, self._data_source.selector, sclass(self))
1493 else:
-> 1494 self._selector = sclass(self)
1495 return self._selector
1496
~/codes/yt/yt/geometry/selection_routines.pyx in yt.geometry.selection_routines.RegionSelector.__init__()
RuntimeError: Error: yt attempted to read outside the boundaries of a non-periodic domain along dimension 0.
Region left edge = -12.017045454545455 code_length, Region right edge = -11.84659090909091 code_length
Dataset left edge = -12.0 code_length, Dataset right edge = 12.0 code_length
This commonly happens when trying to compute ghost cells up to the domain boundary. Two possible solutions are to load a smaller region that does not border the edge or override the periodicity for this dataset.
|
RuntimeError
|
def __str__(self):
s = "s" if self.ghost_zones != 1 else ""
return f"fields {self.fields} require {self.ghost_zones} ghost zone{s}."
|
def __str__(self):
return f"({self.ghost_zones}, {self.fields})"
|
https://github.com/yt-project/yt/issues/2710
|
---------------------------------------------------------------------------
NeedsGridType Traceback (most recent call last)
~/codes/yt/yt/data_objects/data_containers.py in _generate_fluid_field(self, field)
308 try:
--> 309 finfo.check_available(gen_obj)
310 except NeedsGridType as ngt_exception:
~/codes/yt/yt/fields/derived_field.py in check_available(self, data)
210 for validator in self.validators:
--> 211 validator(data)
212 # If we don't get an exception, we're good to go
~/codes/yt/yt/fields/derived_field.py in __call__(self, data)
487 if not getattr(data, '_spatial', False):
--> 488 raise NeedsGridType(self.ghost_zones,self.fields)
489 if self.ghost_zones <= data._num_ghost_zones:
NeedsGridType: (1, [('gas', 'velocity_x'), ('gas', 'velocity_y'), ('gas', 'velocity_z')])
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-26-c39a34fa7c89> in <module>
3 ds=yt.load('/Volumes/TRANSCEND/Projects/amrvac/2020_prom_form/spex_dm_4_24_528_4224_3G/jk2020{0:04d}'.format(file_index)+'.dat', units_override=overrides)
4 #sub=ds.region(left_edge=[-11.9,0.1,0], right_edge=[11.9,24.9,0],center=)
----> 5 plot=yt.plot_2d(ds, "vorticity_magnitude")
6 plot.show()
~/codes/yt/yt/visualization/plot_window.py in plot_2d(ds, fields, center, width, axes_unit, origin, fontsize, field_parameters, window_size, aspect, data_source)
2174 field_parameters=field_parameters,
2175 window_size=window_size, aspect=aspect,
-> 2176 data_source=data_source)
~/codes/yt/yt/visualization/plot_window.py in __init__(self, ds, axis, fields, center, width, axes_unit, origin, right_handed, fontsize, field_parameters, window_size, aspect, data_source, buff_size)
1331 slc = ds.slice(axis, center[axis], field_parameters=field_parameters,
1332 center=center, data_source=data_source)
-> 1333 slc.get_data(fields)
1334 validate_mesh_fields(slc, fields)
1335 PWViewerMPL.__init__(self, slc, bounds, origin=origin,
~/codes/yt/yt/data_objects/data_containers.py in get_data(self, fields)
1617
1618 fields_to_generate += gen_fluids + gen_particles
-> 1619 self._generate_fields(fields_to_generate)
1620 for field in list(self.field_data.keys()):
1621 if field not in ofields:
~/codes/yt/yt/data_objects/data_containers.py in _generate_fields(self, fields_to_generate)
1637 fi = self.ds._get_field_info(*field)
1638 try:
-> 1639 fd = self._generate_field(field)
1640 if hasattr(fd, 'units'):
1641 fd.units.registry = self.ds.unit_registry
~/codes/yt/yt/data_objects/data_containers.py in _generate_field(self, field)
291 tr = self._generate_particle_field(field)
292 else:
--> 293 tr = self._generate_fluid_field(field)
294 if tr is None:
295 raise YTCouldNotGenerateField(field, self.ds)
~/codes/yt/yt/data_objects/data_containers.py in _generate_fluid_field(self, field)
309 finfo.check_available(gen_obj)
310 except NeedsGridType as ngt_exception:
--> 311 rv = self._generate_spatial_fluid(field, ngt_exception.ghost_zones)
312 else:
313 rv = finfo(gen_obj)
~/codes/yt/yt/data_objects/data_containers.py in _generate_spatial_fluid(self, field, ngz)
358 ind += wogz.select(
359 self.selector,
--> 360 gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz],
361 rv, ind)
362 if accumulate:
~/codes/yt/yt/data_objects/data_containers.py in __getitem__(self, key)
254 return self.field_data[f]
255 else:
--> 256 self.get_data(f)
257 # fi.units is the unit expression string. We depend on the registry
258 # hanging off the dataset to define this unit object.
~/codes/yt/yt/data_objects/construction_data_containers.py in get_data(self, fields)
727 self._fill_particles(part)
728
--> 729 if len(fill) > 0: self._fill_fields(fill)
730 for a, f in sorted(alias.items()):
731 if f.sampling_type == 'particle' and not is_sph_field:
~/codes/yt/yt/data_objects/construction_data_containers.py in _fill_fields(self, fields)
1186 if len(fields) == 0: return
1187 ls = self._initialize_level_state(fields)
-> 1188 min_level = self._compute_minimum_level()
1189 # NOTE: This usage of "refine_by" is actually *okay*, because it's
1190 # being used with respect to iref, which is *already* scaled!
~/codes/yt/yt/data_objects/construction_data_containers.py in _compute_minimum_level(self)
1172 ils.data_source.loose_selection = False
1173 min_level = self.level
-> 1174 for chunk in ils.data_source.chunks([], "io"):
1175 # With our odd selection methods, we can sometimes get no-sized ires.
1176 ir = chunk.ires
~/codes/yt/yt/data_objects/data_containers.py in chunks(self, fields, chunking_style, **kwargs)
1497 def chunks(self, fields, chunking_style, **kwargs):
1498 # This is an iterator that will yield the necessary chunks.
-> 1499 self.get_data() # Ensure we have built ourselves
1500 if fields is None: fields = []
1501 # chunk_ind can be supplied in the keyword arguments. If it's a
~/codes/yt/yt/data_objects/data_containers.py in get_data(self, fields)
1540 def get_data(self, fields=None):
1541 if self._current_chunk is None:
-> 1542 self.index._identify_base_chunk(self)
1543 if fields is None: return
1544 nfields = []
~/codes/yt/yt/geometry/grid_geometry_handler.py in _identify_base_chunk(self, dobj)
272 dobj._chunk_info[0] = weakref.proxy(dobj)
273 elif getattr(dobj, "_grids", None) is None:
--> 274 gi = dobj.selector.select_grids(self.grid_left_edge,
275 self.grid_right_edge,
276 self.grid_levels)
~/codes/yt/yt/data_objects/data_containers.py in selector(self)
1492 self, self._data_source.selector, sclass(self))
1493 else:
-> 1494 self._selector = sclass(self)
1495 return self._selector
1496
~/codes/yt/yt/geometry/selection_routines.pyx in yt.geometry.selection_routines.RegionSelector.__init__()
RuntimeError: Error: yt attempted to read outside the boundaries of a non-periodic domain along dimension 0.
Region left edge = -12.017045454545455 code_length, Region right edge = -11.84659090909091 code_length
Dataset left edge = -12.0 code_length, Dataset right edge = 12.0 code_length
This commonly happens when trying to compute ghost cells up to the domain boundary. Two possible solutions are to load a smaller region that does not border the edge or override the periodicity for this dataset.
|
RuntimeError
|
def _create_unit_registry(self, unit_system):
# yt assumes a CGS unit system by default (for back compat reasons).
# Since unyt is MKS by default we specify the MKS values of the base
# units in the CGS system. So, for length, 1 cm = .01 m. And so on.
self.unit_registry = UnitRegistry(unit_system=unit_system)
self.unit_registry.add("code_length", 0.01, dimensions.length)
self.unit_registry.add("code_mass", 0.001, dimensions.mass)
self.unit_registry.add("code_density", 1000.0, dimensions.density)
self.unit_registry.add(
"code_specific_energy", 1.0, dimensions.energy / dimensions.mass
)
self.unit_registry.add("code_time", 1.0, dimensions.time)
if unit_system == "mks":
self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
else:
self.unit_registry.add("code_magnetic", 0.1**0.5, dimensions.magnetic_field_cgs)
self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
self.unit_registry.add("code_pressure", 0.1, dimensions.pressure)
self.unit_registry.add("code_velocity", 0.01, dimensions.velocity)
self.unit_registry.add("code_metallicity", 1.0, dimensions.dimensionless)
self.unit_registry.add("h", 1.0, dimensions.dimensionless, r"h")
self.unit_registry.add("a", 1.0, dimensions.dimensionless)
|
def _create_unit_registry(self, unit_system):
from yt.units import dimensions as dimensions
# yt assumes a CGS unit system by default (for back compat reasons).
# Since unyt is MKS by default we specify the MKS values of the base
# units in the CGS system. So, for length, 1 cm = .01 m. And so on.
self.unit_registry = UnitRegistry(unit_system=unit_system)
self.unit_registry.add("code_length", 0.01, dimensions.length)
self.unit_registry.add("code_mass", 0.001, dimensions.mass)
self.unit_registry.add("code_density", 1000.0, dimensions.density)
self.unit_registry.add(
"code_specific_energy", 1.0, dimensions.energy / dimensions.mass
)
self.unit_registry.add("code_time", 1.0, dimensions.time)
if unit_system == "mks":
self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
else:
self.unit_registry.add("code_magnetic", 0.1**0.5, dimensions.magnetic_field_cgs)
self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
self.unit_registry.add("code_pressure", 0.1, dimensions.pressure)
self.unit_registry.add("code_velocity", 0.01, dimensions.velocity)
self.unit_registry.add("code_metallicity", 1.0, dimensions.dimensionless)
self.unit_registry.add("h", 1.0, dimensions.dimensionless, r"h")
self.unit_registry.add("a", 1.0, dimensions.dimensionless)
|
https://github.com/yt-project/yt/issues/2700
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-29-01d8197cb9a9> in <module>
6 return data["x"].base * unyt.msun
7 ds = yt.testing.fake_amr_ds(fields=["density"])
----> 8 ds.add_field(name="test", func=_test, sampling_type="cell", units="kg")
~/dev/python/yt-project/yt/yt/data_objects/static_output.py in add_field(self, name, function, sampling_type, **kwargs)
1271 self.field_info.add_field(name, sampling_type, function=function, **kwargs)
1272 self.field_info._show_field_errors.append(name)
-> 1273 deps, _ = self.field_info.check_derived_fields([name])
1274 self.field_dependencies.update(deps)
1275
~/dev/python/yt-project/yt/yt/fields/field_info_container.py in check_derived_fields(self, fields_to_check)
395 for field in fields_to_check:
396 if field not in self:
--> 397 raise YTFieldNotFound(str(field))
398 fi = self[field]
399 try:
TypeError: __init__() missing 1 required positional argument: 'ds'
|
TypeError
|
def set_units(self):
"""
Creates the unit registry for this dataset.
"""
if getattr(self, "cosmological_simulation", False):
# this dataset is cosmological, so add cosmological units.
self.unit_registry.modify("h", self.hubble_constant)
# Comoving lengths
for my_unit in ["m", "pc", "AU", "au"]:
new_unit = "%scm" % my_unit
my_u = Unit(my_unit, registry=self.unit_registry)
self.unit_registry.add(
new_unit,
my_u.base_value / (1 + self.current_redshift),
dimensions.length,
"\\rm{%s}/(1+z)" % my_unit,
prefixable=True,
)
self.unit_registry.modify("a", 1 / (1 + self.current_redshift))
self.set_code_units()
if getattr(self, "cosmological_simulation", False):
# this dataset is cosmological, add a cosmology object
# Set dynamical dark energy parameters
use_dark_factor = getattr(self, "use_dark_factor", False)
w_0 = getattr(self, "w_0", -1.0)
w_a = getattr(self, "w_a", 0.0)
# many frontends do not set this
setdefaultattr(self, "omega_radiation", 0.0)
self.cosmology = Cosmology(
hubble_constant=self.hubble_constant,
omega_matter=self.omega_matter,
omega_lambda=self.omega_lambda,
omega_radiation=self.omega_radiation,
use_dark_factor=use_dark_factor,
w_0=w_0,
w_a=w_a,
)
self.critical_density = self.cosmology.critical_density(self.current_redshift)
self.scale_factor = 1.0 / (1.0 + self.current_redshift)
|
def set_units(self):
"""
Creates the unit registry for this dataset.
"""
from yt.units.dimensions import length
if getattr(self, "cosmological_simulation", False):
# this dataset is cosmological, so add cosmological units.
self.unit_registry.modify("h", self.hubble_constant)
# Comoving lengths
for my_unit in ["m", "pc", "AU", "au"]:
new_unit = "%scm" % my_unit
my_u = Unit(my_unit, registry=self.unit_registry)
self.unit_registry.add(
new_unit,
my_u.base_value / (1 + self.current_redshift),
length,
"\\rm{%s}/(1+z)" % my_unit,
prefixable=True,
)
self.unit_registry.modify("a", 1 / (1 + self.current_redshift))
self.set_code_units()
if getattr(self, "cosmological_simulation", False):
# this dataset is cosmological, add a cosmology object
# Set dynamical dark energy parameters
use_dark_factor = getattr(self, "use_dark_factor", False)
w_0 = getattr(self, "w_0", -1.0)
w_a = getattr(self, "w_a", 0.0)
# many frontends do not set this
setdefaultattr(self, "omega_radiation", 0.0)
self.cosmology = Cosmology(
hubble_constant=self.hubble_constant,
omega_matter=self.omega_matter,
omega_lambda=self.omega_lambda,
omega_radiation=self.omega_radiation,
use_dark_factor=use_dark_factor,
w_0=w_0,
w_a=w_a,
)
self.critical_density = self.cosmology.critical_density(self.current_redshift)
self.scale_factor = 1.0 / (1.0 + self.current_redshift)
|
https://github.com/yt-project/yt/issues/2700
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-29-01d8197cb9a9> in <module>
6 return data["x"].base * unyt.msun
7 ds = yt.testing.fake_amr_ds(fields=["density"])
----> 8 ds.add_field(name="test", func=_test, sampling_type="cell", units="kg")
~/dev/python/yt-project/yt/yt/data_objects/static_output.py in add_field(self, name, function, sampling_type, **kwargs)
1271 self.field_info.add_field(name, sampling_type, function=function, **kwargs)
1272 self.field_info._show_field_errors.append(name)
-> 1273 deps, _ = self.field_info.check_derived_fields([name])
1274 self.field_dependencies.update(deps)
1275
~/dev/python/yt-project/yt/yt/fields/field_info_container.py in check_derived_fields(self, fields_to_check)
395 for field in fields_to_check:
396 if field not in self:
--> 397 raise YTFieldNotFound(str(field))
398 fi = self[field]
399 try:
TypeError: __init__() missing 1 required positional argument: 'ds'
|
TypeError
|
def add_field(self, name, function, sampling_type, **kwargs):
"""
Dataset-specific call to add_field
Add a new field, along with supplemental metadata, to the list of
available fields. This respects a number of arguments, all of which
are passed on to the constructor for
:class:`~yt.data_objects.api.DerivedField`.
Parameters
----------
name : str
is the name of the field.
function : callable
A function handle that defines the field. Should accept
arguments (field, data)
sampling_type: str
"cell" or "particle" or "local"
units : str
A plain text string encoding the unit. Powers must be in
python syntax (** instead of ^).
take_log : bool
Describes whether the field should be logged
validators : list
A list of :class:`FieldValidator` objects
vector_field : bool
Describes the dimensionality of the field. Currently unused.
display_name : str
A name used in the plots
force_override : bool
Whether to override an existing derived field. Does not work with
on-disk fields.
"""
self.index
override = kwargs.get("force_override", False)
if override and name in self.index.field_list:
raise RuntimeError(
"force_override is only meant to be used with "
"derived fields, not on-disk fields."
)
# Handle the case where the field has already been added.
if not override and name in self.field_info:
mylog.warning(
"Field %s already exists. To override use `force_override=True`.",
name,
)
self.field_info.add_field(name, function, sampling_type, **kwargs)
self.field_info._show_field_errors.append(name)
deps, _ = self.field_info.check_derived_fields([name])
self.field_dependencies.update(deps)
|
def add_field(self, name, function=None, sampling_type=None, **kwargs):
"""
Dataset-specific call to add_field
Add a new field, along with supplemental metadata, to the list of
available fields. This respects a number of arguments, all of which
are passed on to the constructor for
:class:`~yt.data_objects.api.DerivedField`.
Parameters
----------
name : str
is the name of the field.
function : callable
A function handle that defines the field. Should accept
arguments (field, data)
units : str
A plain text string encoding the unit. Powers must be in
python syntax (** instead of ^).
take_log : bool
Describes whether the field should be logged
validators : list
A list of :class:`FieldValidator` objects
particle_type : bool
Is this a particle (1D) field?
vector_field : bool
Describes the dimensionality of the field. Currently unused.
display_name : str
A name used in the plots
force_override : bool
Whether to override an existing derived field. Does not work with
on-disk fields.
"""
self.index
override = kwargs.get("force_override", False)
if override and name in self.index.field_list:
raise RuntimeError(
"force_override is only meant to be used with "
"derived fields, not on-disk fields."
)
# Handle the case where the field has already been added.
if not override and name in self.field_info:
mylog.error(
"Field %s already exists. To override use " + "force_override=True.",
name,
)
if kwargs.setdefault("particle_type", False):
if sampling_type is not None and sampling_type != "particle":
raise RuntimeError(
"Clashing definition of 'sampling_type' and "
"'particle_type'. Note that 'particle_type' is "
"deprecated. Please just use 'sampling_type'."
)
else:
sampling_type = "particle"
if sampling_type is None:
warnings.warn(
"Because 'sampling_type' not specified, yt will "
"assume a cell 'sampling_type'",
stacklevel=2,
)
sampling_type = "cell"
self.field_info.add_field(name, sampling_type, function=function, **kwargs)
self.field_info._show_field_errors.append(name)
deps, _ = self.field_info.check_derived_fields([name])
self.field_dependencies.update(deps)
|
https://github.com/yt-project/yt/issues/2700
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-29-01d8197cb9a9> in <module>
6 return data["x"].base * unyt.msun
7 ds = yt.testing.fake_amr_ds(fields=["density"])
----> 8 ds.add_field(name="test", func=_test, sampling_type="cell", units="kg")
~/dev/python/yt-project/yt/yt/data_objects/static_output.py in add_field(self, name, function, sampling_type, **kwargs)
1271 self.field_info.add_field(name, sampling_type, function=function, **kwargs)
1272 self.field_info._show_field_errors.append(name)
-> 1273 deps, _ = self.field_info.check_derived_fields([name])
1274 self.field_dependencies.update(deps)
1275
~/dev/python/yt-project/yt/yt/fields/field_info_container.py in check_derived_fields(self, fields_to_check)
395 for field in fields_to_check:
396 if field not in self:
--> 397 raise YTFieldNotFound(str(field))
398 fi = self[field]
399 try:
TypeError: __init__() missing 1 required positional argument: 'ds'
|
TypeError
|
def add_field(self, name, function, sampling_type, **kwargs):
"""
Add a new field, along with supplemental metadata, to the list of
available fields. This respects a number of arguments, all of which
are passed on to the constructor for
:class:`~yt.data_objects.api.DerivedField`.
Parameters
----------
name : str
is the name of the field.
function : callable
A function handle that defines the field. Should accept
arguments (field, data)
sampling_type: str
"cell" or "particle" or "local"
units : str
A plain text string encoding the unit. Powers must be in
python syntax (** instead of ^). If set to "auto" the units
will be inferred from the return value of the field function.
take_log : bool
Describes whether the field should be logged
validators : list
A list of :class:`FieldValidator` objects
vector_field : bool
Describes the dimensionality of the field. Currently unused.
display_name : str
A name used in the plots
"""
override = kwargs.pop("force_override", False)
# Handle the case where the field has already been added.
if not override and name in self:
# See below.
if function is None:
def create_function(f):
return f
return create_function
return
# add_field can be used in two different ways: it can be called
# directly, or used as a decorator (as yt.derived_field). If called directly,
# the function will be passed in as an argument, and we simply create
# the derived field and exit. If used as a decorator, function will
# be None. In that case, we return a function that will be applied
# to the function that the decorator is applied to.
kwargs.setdefault("ds", self.ds)
if function is None:
def create_function(f):
self[name] = DerivedField(name, sampling_type, f, **kwargs)
return f
return create_function
if isinstance(name, tuple):
self[name] = DerivedField(name, sampling_type, function, **kwargs)
return
sampling_type = self._sanitize_sampling_type(
sampling_type, particle_type=kwargs.get("particle_type")
)
if sampling_type == "particle":
ftype = "all"
else:
ftype = self.ds.default_fluid_type
if (ftype, name) not in self:
tuple_name = (ftype, name)
self[tuple_name] = DerivedField(tuple_name, sampling_type, function, **kwargs)
self.alias(name, tuple_name)
else:
self[name] = DerivedField(name, sampling_type, function, **kwargs)
|
def add_field(self, name, sampling_type, function=None, **kwargs):
"""
Add a new field, along with supplemental metadata, to the list of
available fields. This respects a number of arguments, all of which
are passed on to the constructor for
:class:`~yt.data_objects.api.DerivedField`.
Parameters
----------
name : str
is the name of the field.
function : callable
A function handle that defines the field. Should accept
arguments (field, data)
units : str
A plain text string encoding the unit. Powers must be in
python syntax (** instead of ^). If set to "auto" the units
will be inferred from the return value of the field function.
take_log : bool
Describes whether the field should be logged
validators : list
A list of :class:`FieldValidator` objects
particle_type : bool
Is this a particle (1D) field?
vector_field : bool
Describes the dimensionality of the field. Currently unused.
display_name : str
A name used in the plots
"""
override = kwargs.pop("force_override", False)
# Handle the case where the field has already been added.
if not override and name in self:
# See below.
if function is None:
def create_function(f):
return f
return create_function
return
# add_field can be used in two different ways: it can be called
# directly, or used as a decorator. If called directly, the
# function will be passed in as an argument, and we simply create
# the derived field and exit. If used as a decorator, function will
# be None. In that case, we return a function that will be applied
# to the function that the decorator is applied to.
kwargs.setdefault("ds", self.ds)
if function is None:
def create_function(f):
self[name] = DerivedField(name, sampling_type, f, **kwargs)
return f
return create_function
if isinstance(name, tuple):
self[name] = DerivedField(name, sampling_type, function, **kwargs)
return
particle_field = False
if sampling_type == "particle":
particle_field = True
if kwargs.get("particle_type", False):
warnings.warn(
"The particle_type keyword argument of add_field has been "
'deprecated. Please set sampling_type="particle" instead.',
stacklevel=2,
)
particle_field = True
if particle_field:
ftype = "all"
else:
ftype = self.ds.default_fluid_type
if (ftype, name) not in self:
tuple_name = (ftype, name)
self[tuple_name] = DerivedField(tuple_name, sampling_type, function, **kwargs)
self.alias(name, tuple_name)
else:
self[name] = DerivedField(name, sampling_type, function, **kwargs)
|
https://github.com/yt-project/yt/issues/2700
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-29-01d8197cb9a9> in <module>
6 return data["x"].base * unyt.msun
7 ds = yt.testing.fake_amr_ds(fields=["density"])
----> 8 ds.add_field(name="test", func=_test, sampling_type="cell", units="kg")
~/dev/python/yt-project/yt/yt/data_objects/static_output.py in add_field(self, name, function, sampling_type, **kwargs)
1271 self.field_info.add_field(name, sampling_type, function=function, **kwargs)
1272 self.field_info._show_field_errors.append(name)
-> 1273 deps, _ = self.field_info.check_derived_fields([name])
1274 self.field_dependencies.update(deps)
1275
~/dev/python/yt-project/yt/yt/fields/field_info_container.py in check_derived_fields(self, fields_to_check)
395 for field in fields_to_check:
396 if field not in self:
--> 397 raise YTFieldNotFound(str(field))
398 fi = self[field]
399 try:
TypeError: __init__() missing 1 required positional argument: 'ds'
|
TypeError
|
def add_field(self, name, function, sampling_type, **kwargs):
sampling_type = self._sanitize_sampling_type(
sampling_type, kwargs.get("particle_type")
)
if isinstance(name, str) or not iterable(name):
if sampling_type == "particle":
ftype = "all"
else:
ftype = "gas"
name = (ftype, name)
override = kwargs.get("force_override", False)
# Handle the case where the field has already been added.
if not override and name in self:
mylog.warning(
"Field %s already exists. To override use `force_override=True`.",
name,
)
return super(LocalFieldInfoContainer, self).add_field(
name, function, sampling_type, **kwargs
)
|
def add_field(self, name, function=None, sampling_type=None, **kwargs):
if not isinstance(name, tuple):
if kwargs.setdefault("particle_type", False):
name = ("all", name)
else:
name = ("gas", name)
override = kwargs.get("force_override", False)
# Handle the case where the field has already been added.
if not override and name in self:
mylog.error(
"Field %s already exists. To override use " + "force_override=True.",
name,
)
if kwargs.setdefault("particle_type", False):
if sampling_type is not None and sampling_type != "particle":
raise RuntimeError(
"Clashing definition of 'sampling_type' and "
"'particle_type'. Note that 'particle_type' is "
"deprecated. Please just use 'sampling_type'."
)
else:
sampling_type = "particle"
if sampling_type is None:
warnings.warn(
"Because 'sampling_type' is not specified, yt will "
"assume a 'cell' sampling_type for the %s field" % (name,),
stacklevel=3,
)
sampling_type = "cell"
return super(LocalFieldInfoContainer, self).add_field(
name, sampling_type, function, **kwargs
)
|
https://github.com/yt-project/yt/issues/2700
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-29-01d8197cb9a9> in <module>
6 return data["x"].base * unyt.msun
7 ds = yt.testing.fake_amr_ds(fields=["density"])
----> 8 ds.add_field(name="test", func=_test, sampling_type="cell", units="kg")
~/dev/python/yt-project/yt/yt/data_objects/static_output.py in add_field(self, name, function, sampling_type, **kwargs)
1271 self.field_info.add_field(name, sampling_type, function=function, **kwargs)
1272 self.field_info._show_field_errors.append(name)
-> 1273 deps, _ = self.field_info.check_derived_fields([name])
1274 self.field_dependencies.update(deps)
1275
~/dev/python/yt-project/yt/yt/fields/field_info_container.py in check_derived_fields(self, fields_to_check)
395 for field in fields_to_check:
396 if field not in self:
--> 397 raise YTFieldNotFound(str(field))
398 fi = self[field]
399 try:
TypeError: __init__() missing 1 required positional argument: 'ds'
|
TypeError
|
def __init__(
self,
filename,
dataset_type="gadget_binary",
additional_fields=(),
unit_base=None,
index_order=None,
index_filename=None,
kdtree_filename=None,
kernel_name=None,
bounding_box=None,
header_spec="default",
field_spec="default",
ptype_spec="default",
long_ids=False,
units_override=None,
mean_molecular_weight=None,
header_offset=0,
unit_system="cgs",
use_dark_factor=False,
w_0=-1.0,
w_a=0.0,
):
if self._instantiated:
return
# Check if filename is a directory
if os.path.isdir(filename):
# Get the .0 snapshot file. We know there's only 1 and it's valid since we
# came through _is_valid in load()
for f in os.listdir(filename):
fname = os.path.join(filename, f)
fext = os.path.splitext(fname)[-1]
if (
(".0" in f)
and (fext not in {".ewah", ".kdtree"})
and os.path.isfile(fname)
):
filename = os.path.join(filename, f)
break
self._header = GadgetBinaryHeader(filename, header_spec)
header_size = self._header.size
if header_size != [256]:
only_on_root(
mylog.warn,
"Non-standard header size is detected! "
"Gadget-2 standard header is 256 bytes, but yours is %s. "
"Make sure a non-standard header is actually expected. "
"Otherwise something is wrong, "
"and you might want to check how the dataset is loaded. "
"Futher information about header specification can be found in "
"https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.",
header_size,
)
self._field_spec = self._setup_binary_spec(field_spec, gadget_field_specs)
self._ptype_spec = self._setup_binary_spec(ptype_spec, gadget_ptype_specs)
self.storage_filename = None
if long_ids:
self._id_dtype = "u8"
else:
self._id_dtype = "u4"
self.long_ids = long_ids
self.header_offset = header_offset
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
# integration the redshift will be zero.
unit_base["cmcm"] = 1.0 / unit_base["UnitLength_in_cm"]
self._unit_base = unit_base
if bounding_box is not None:
# This ensures that we know a bounding box has been applied
self._domain_override = True
bbox = np.array(bounding_box, dtype="float64")
if bbox.shape == (2, 3):
bbox = bbox.transpose()
self.domain_left_edge = bbox[:, 0]
self.domain_right_edge = bbox[:, 1]
else:
self.domain_left_edge = self.domain_right_edge = None
if units_override is not None:
raise RuntimeError(
"units_override is not supported for GadgetDataset. "
+ "Use unit_base instead."
)
# Set dark energy parameters before cosmology object is created
self.use_dark_factor = use_dark_factor
self.w_0 = w_0
self.w_a = w_a
super(GadgetDataset, self).__init__(
filename,
dataset_type=dataset_type,
unit_system=unit_system,
index_order=index_order,
index_filename=index_filename,
kdtree_filename=kdtree_filename,
kernel_name=kernel_name,
)
if self.cosmological_simulation:
self.time_unit.convert_to_units("s/h")
self.length_unit.convert_to_units("kpccm/h")
self.mass_unit.convert_to_units("g/h")
else:
self.time_unit.convert_to_units("s")
self.length_unit.convert_to_units("kpc")
self.mass_unit.convert_to_units("Msun")
if mean_molecular_weight is None:
self.mu = default_mu
else:
self.mu = mean_molecular_weight
|
def __init__(
self,
filename,
dataset_type="gadget_binary",
additional_fields=(),
unit_base=None,
index_order=None,
index_filename=None,
kdtree_filename=None,
kernel_name=None,
bounding_box=None,
header_spec="default",
field_spec="default",
ptype_spec="default",
long_ids=False,
units_override=None,
mean_molecular_weight=None,
header_offset=0,
unit_system="cgs",
use_dark_factor=False,
w_0=-1.0,
w_a=0.0,
):
if self._instantiated:
return
# Check if filename is a directory
if os.path.isdir(filename):
# Get the .0 snapshot file. We know there's only 1 and it's valid since we
# came through _is_valid in load()
for f in os.listdir(filename):
fname = os.path.join(filename, f)
if (".0" in f) and (".ewah" not in f) and os.path.isfile(fname):
filename = os.path.join(filename, f)
break
self._header = GadgetBinaryHeader(filename, header_spec)
header_size = self._header.size
if header_size != [256]:
only_on_root(
mylog.warn,
"Non-standard header size is detected! "
"Gadget-2 standard header is 256 bytes, but yours is %s. "
"Make sure a non-standard header is actually expected. "
"Otherwise something is wrong, "
"and you might want to check how the dataset is loaded. "
"Futher information about header specification can be found in "
"https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.",
header_size,
)
self._field_spec = self._setup_binary_spec(field_spec, gadget_field_specs)
self._ptype_spec = self._setup_binary_spec(ptype_spec, gadget_ptype_specs)
self.storage_filename = None
if long_ids:
self._id_dtype = "u8"
else:
self._id_dtype = "u4"
self.long_ids = long_ids
self.header_offset = header_offset
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
# integration the redshift will be zero.
unit_base["cmcm"] = 1.0 / unit_base["UnitLength_in_cm"]
self._unit_base = unit_base
if bounding_box is not None:
# This ensures that we know a bounding box has been applied
self._domain_override = True
bbox = np.array(bounding_box, dtype="float64")
if bbox.shape == (2, 3):
bbox = bbox.transpose()
self.domain_left_edge = bbox[:, 0]
self.domain_right_edge = bbox[:, 1]
else:
self.domain_left_edge = self.domain_right_edge = None
if units_override is not None:
raise RuntimeError(
"units_override is not supported for GadgetDataset. "
+ "Use unit_base instead."
)
# Set dark energy parameters before cosmology object is created
self.use_dark_factor = use_dark_factor
self.w_0 = w_0
self.w_a = w_a
super(GadgetDataset, self).__init__(
filename,
dataset_type=dataset_type,
unit_system=unit_system,
index_order=index_order,
index_filename=index_filename,
kdtree_filename=kdtree_filename,
kernel_name=kernel_name,
)
if self.cosmological_simulation:
self.time_unit.convert_to_units("s/h")
self.length_unit.convert_to_units("kpccm/h")
self.mass_unit.convert_to_units("g/h")
else:
self.time_unit.convert_to_units("s")
self.length_unit.convert_to_units("kpc")
self.mass_unit.convert_to_units("Msun")
if mean_molecular_weight is None:
self.mu = default_mu
else:
self.mu = mean_molecular_weight
|
https://github.com/yt-project/yt/issues/2778
|
Traceback (most recent call last):
File "ala.py", line 2, in <module>
ds = OWLSDataset("/home/xarth/codes/yt_data/snapshot_033/")
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 571, in __init__
super(GadgetHDF5Dataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 303, in __init__
super(GadgetDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/frontends/sph/data_structures.py", line 34, in __init__
super(SPHDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/data_objects/static_output.py", line 1743, in __init__
super(ParticleDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/data_objects/static_output.py", line 256, in __init__
self._parse_parameter_file()
File "/home/xarth/codes/xarthisius/yt/yt/frontends/owls/data_structures.py", line 19, in _parse_parameter_file
hvals = self._get_hvals()
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 583, in _get_hvals
handle = h5py.File(self.parameter_filename, mode="r")
File "/home/xarth/.local/lib/python3.8/site-packages/h5py/_hl/files.py", line 406, in __init__
fid = make_fid(name, mode, userblock_size,
File "/home/xarth/.local/lib/python3.8/site-packages/h5py/_hl/files.py", line 173, in make_fid
fid = h5f.open(name, flags, fapl=fapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5f.pyx", line 88, in h5py.h5f.open
OSError: Unable to open file (file signature not found)
|
OSError
|
def _is_valid(self, *args, **kwargs):
need_groups = ["Header"]
veto_groups = ["FOF", "Group", "Subhalo"]
valid = True
valid_fname = args[0]
# If passed arg is a directory, look for the .0 file in that dir
if os.path.isdir(args[0]):
valid_files = []
for f in os.listdir(args[0]):
fname = os.path.join(args[0], f)
fext = os.path.splitext(fname)[-1]
if (
(".0" in f)
and (fext not in {".ewah", ".kdtree"})
and os.path.isfile(fname)
):
valid_files.append(fname)
if len(valid_files) == 0:
valid = False
elif len(valid_files) > 1:
valid = False
else:
valid_fname = valid_files[0]
try:
fh = h5py.File(valid_fname, mode="r")
valid = all(ng in fh["/"] for ng in need_groups) and not any(
vg in fh["/"] for vg in veto_groups
)
dmetal = "/PartType0/Metallicity"
if dmetal not in fh or fh[dmetal].shape[1] not in (11, 17):
valid = False
fh.close()
except Exception:
valid = False
pass
return valid
|
def _is_valid(self, *args, **kwargs):
need_groups = ["Header"]
veto_groups = ["FOF", "Group", "Subhalo"]
valid = True
valid_fname = args[0]
# If passed arg is a directory, look for the .0 file in that dir
if os.path.isdir(args[0]):
valid_files = []
for f in os.listdir(args[0]):
fname = os.path.join(args[0], f)
if (".0" in f) and (".ewah" not in f) and os.path.isfile(fname):
valid_files.append(fname)
if len(valid_files) == 0:
valid = False
elif len(valid_files) > 1:
valid = False
else:
valid_fname = valid_files[0]
try:
fh = h5py.File(valid_fname, mode="r")
valid = all(ng in fh["/"] for ng in need_groups) and not any(
vg in fh["/"] for vg in veto_groups
)
dmetal = "/PartType0/Metallicity"
if dmetal not in fh or fh[dmetal].shape[1] not in (11, 17):
valid = False
fh.close()
except Exception:
valid = False
pass
return valid
|
https://github.com/yt-project/yt/issues/2778
|
Traceback (most recent call last):
File "ala.py", line 2, in <module>
ds = OWLSDataset("/home/xarth/codes/yt_data/snapshot_033/")
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 571, in __init__
super(GadgetHDF5Dataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 303, in __init__
super(GadgetDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/frontends/sph/data_structures.py", line 34, in __init__
super(SPHDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/data_objects/static_output.py", line 1743, in __init__
super(ParticleDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/data_objects/static_output.py", line 256, in __init__
self._parse_parameter_file()
File "/home/xarth/codes/xarthisius/yt/yt/frontends/owls/data_structures.py", line 19, in _parse_parameter_file
hvals = self._get_hvals()
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 583, in _get_hvals
handle = h5py.File(self.parameter_filename, mode="r")
File "/home/xarth/.local/lib/python3.8/site-packages/h5py/_hl/files.py", line 406, in __init__
fid = make_fid(name, mode, userblock_size,
File "/home/xarth/.local/lib/python3.8/site-packages/h5py/_hl/files.py", line 173, in make_fid
fid = h5f.open(name, flags, fapl=fapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5f.pyx", line 88, in h5py.h5f.open
OSError: Unable to open file (file signature not found)
|
OSError
|
def _is_valid(self, *args, **kwargs):
need_groups = ["Constants", "Header", "Parameters", "Units"]
veto_groups = [
"SUBFIND",
"FOF",
"PartType0/ChemistryAbundances",
"PartType0/ChemicalAbundances",
"RuntimePars",
"HashTable",
]
valid = True
valid_fname = args[0]
# If passed arg is a directory, look for the .0 file in that dir
if os.path.isdir(args[0]):
valid_files = []
for f in os.listdir(args[0]):
fname = os.path.join(args[0], f)
fext = os.path.splitext(fname)[-1]
if (
(".0" in f)
and (fext not in {".ewah", ".kdtree"})
and os.path.isfile(fname)
):
valid_files.append(fname)
if len(valid_files) == 0:
valid = False
elif len(valid_files) > 1:
valid = False
else:
valid_fname = valid_files[0]
try:
fileh = h5py.File(valid_fname, mode="r")
for ng in need_groups:
if ng not in fileh["/"]:
valid = False
for vg in veto_groups:
if vg in fileh["/"]:
valid = False
fileh.close()
except Exception:
valid = False
pass
return valid
|
def _is_valid(self, *args, **kwargs):
need_groups = ["Constants", "Header", "Parameters", "Units"]
veto_groups = [
"SUBFIND",
"FOF",
"PartType0/ChemistryAbundances",
"PartType0/ChemicalAbundances",
"RuntimePars",
"HashTable",
]
valid = True
valid_fname = args[0]
# If passed arg is a directory, look for the .0 file in that dir
if os.path.isdir(args[0]):
valid_files = []
for f in os.listdir(args[0]):
fname = os.path.join(args[0], f)
if (".0" in f) and (".ewah" not in f) and os.path.isfile(fname):
valid_files.append(fname)
if len(valid_files) == 0:
valid = False
elif len(valid_files) > 1:
valid = False
else:
valid_fname = valid_files[0]
try:
fileh = h5py.File(valid_fname, mode="r")
for ng in need_groups:
if ng not in fileh["/"]:
valid = False
for vg in veto_groups:
if vg in fileh["/"]:
valid = False
fileh.close()
except Exception:
valid = False
pass
return valid
|
https://github.com/yt-project/yt/issues/2778
|
Traceback (most recent call last):
File "ala.py", line 2, in <module>
ds = OWLSDataset("/home/xarth/codes/yt_data/snapshot_033/")
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 571, in __init__
super(GadgetHDF5Dataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 303, in __init__
super(GadgetDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/frontends/sph/data_structures.py", line 34, in __init__
super(SPHDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/data_objects/static_output.py", line 1743, in __init__
super(ParticleDataset, self).__init__(
File "/home/xarth/codes/xarthisius/yt/yt/data_objects/static_output.py", line 256, in __init__
self._parse_parameter_file()
File "/home/xarth/codes/xarthisius/yt/yt/frontends/owls/data_structures.py", line 19, in _parse_parameter_file
hvals = self._get_hvals()
File "/home/xarth/codes/xarthisius/yt/yt/frontends/gadget/data_structures.py", line 583, in _get_hvals
handle = h5py.File(self.parameter_filename, mode="r")
File "/home/xarth/.local/lib/python3.8/site-packages/h5py/_hl/files.py", line 406, in __init__
fid = make_fid(name, mode, userblock_size,
File "/home/xarth/.local/lib/python3.8/site-packages/h5py/_hl/files.py", line 173, in make_fid
fid = h5f.open(name, flags, fapl=fapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5f.pyx", line 88, in h5py.h5f.open
OSError: Unable to open file (file signature not found)
|
OSError
|
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info["units"] = str(self.units)
info["unit_registry"] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = "array_data"
f = h5py.File(filename, mode="a")
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
|
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info["units"] = str(self.units)
info["unit_registry"] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = "array_data"
f = h5py.File(filename, "w")
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
|
https://github.com/yt-project/yt/issues/2642
|
Traceback (most recent call last):
File "/tmp/ala.py", line 8, in <module>
read_dens = YTArray.from_hdf5("my_data.h5", dataset_name="density")
File "/home/xarth/codes/xarthisius/yt/yt/units/yt_array.py", line 1019, in from_hdf5
dataset = g[dataset_name]
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "/home/xarth/.local/lib/python3.6/site-packages/h5py/_hl/group.py", line 264, in __getitem__
oid = h5o.open(self.id, self._e(name), lapl=self._lapl)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5o.pyx", line 190, in h5py.h5o.open
KeyError: "Unable to open object (object 'density' doesn't exist)"
|
KeyError
|
def get_time_series(
self,
time_data=True,
redshift_data=True,
initial_time=None,
final_time=None,
initial_redshift=None,
final_redshift=None,
initial_cycle=None,
final_cycle=None,
times=None,
redshifts=None,
tolerance=None,
parallel=True,
setup_function=None,
):
"""
Instantiate a DatasetSeries object for a set of outputs.
If no additional keywords given, a DatasetSeries object will be
created with all potential datasets created by the simulation.
Outputs can be gather by specifying a time or redshift range
(or combination of time and redshift), with a specific list of
times or redshifts, a range of cycle numbers (for cycle based
output), or by simply searching all subdirectories within the
simulation directory.
time_data : bool
Whether or not to include time outputs when gathering
datasets for time series.
Default: True.
redshift_data : bool
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
initial_time : tuple of type (float, str)
The earliest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (5.0, "Gyr"). If None, the initial time of the
simulation is used. This can be used in combination with
either final_time or final_redshift.
Default: None.
final_time : tuple of type (float, str)
The latest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (13.7, "Gyr"). If None, the final time of the
simulation is used. This can be used in combination with either
initial_time or initial_redshift.
Default: None.
times : tuple of type (float array, str)
A list of times for which outputs will be found and the units
of those values. For example, ([0, 1, 2, 3], "s").
Default: None.
initial_redshift : float
The earliest redshift for outputs to be included. If None,
the initial redshift of the simulation is used. This can be
used in combination with either final_time or
final_redshift.
Default: None.
final_redshift : float
The latest redshift for outputs to be included. If None,
the final redshift of the simulation is used. This can be
used in combination with either initial_time or
initial_redshift.
Default: None.
redshifts : array_like
A list of redshifts for which outputs will be found.
Default: None.
initial_cycle : float
The earliest cycle for outputs to be included. If None,
the initial cycle of the simulation is used. This can
only be used with final_cycle.
Default: None.
final_cycle : float
The latest cycle for outputs to be included. If None,
the final cycle of the simulation is used. This can
only be used in combination with initial_cycle.
Default: None.
tolerance : float
Used in combination with "times" or "redshifts" keywords,
this is the tolerance within which outputs are accepted
given the requested times or redshifts. If None, the
nearest output is always taken.
Default: None.
parallel : bool/int
If True, the generated DatasetSeries will divide the work
such that a single processor works on each dataset. If an
integer is supplied, the work will be divided into that
number of jobs.
Default: True.
setup_function : callable, accepts a ds
This function will be called whenever a dataset is loaded.
Examples
--------
>>> import yt
>>> es = yt.simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
>>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
redshift_data=False)
>>> for ds in es:
... print(ds.current_time)
>>> es.get_time_series(redshifts=[3, 2, 1, 0])
>>> for ds in es:
... print(ds.current_time)
"""
if (
initial_redshift is not None or final_redshift is not None
) and not self.cosmological_simulation:
raise InvalidSimulationTimeSeries(
"An initial or final redshift has been given for a "
+ "noncosmological simulation."
)
if time_data and redshift_data:
my_all_outputs = self.all_outputs
elif time_data:
my_all_outputs = self.all_time_outputs
elif redshift_data:
my_all_outputs = self.all_redshift_outputs
else:
raise InvalidSimulationTimeSeries("Both time_data and redshift_data are False.")
if not my_all_outputs:
DatasetSeries.__init__(self, outputs=[], parallel=parallel)
mylog.info("0 outputs loaded into time series.")
return
# Apply selection criteria to the set.
if times is not None:
my_outputs = self._get_outputs_by_key(
"time", times, tolerance=tolerance, outputs=my_all_outputs
)
elif redshifts is not None:
my_outputs = self._get_outputs_by_key(
"redshift", redshifts, tolerance=tolerance, outputs=my_all_outputs
)
elif initial_cycle is not None or final_cycle is not None:
if initial_cycle is None:
initial_cycle = 0
else:
initial_cycle = max(initial_cycle, 0)
if final_cycle is None:
final_cycle = self.parameters["StopCycle"]
else:
final_cycle = min(final_cycle, self.parameters["StopCycle"])
my_outputs = my_all_outputs[
int(ceil(float(initial_cycle) / self.parameters["CycleSkipDataDump"])) : (
final_cycle / self.parameters["CycleSkipDataDump"]
)
+ 1
]
else:
if initial_time is not None:
if isinstance(initial_time, float):
my_initial_time = self.quan(initial_time, "code_time")
elif isinstance(initial_time, tuple) and len(initial_time) == 2:
my_initial_time = self.quan(*initial_time)
elif not isinstance(initial_time, YTArray):
raise RuntimeError(
"Error: initial_time must be given as a float or "
+ "tuple of (value, units)."
)
elif initial_redshift is not None:
my_initial_time = self.cosmology.t_from_z(initial_redshift)
else:
my_initial_time = self.initial_time
if final_time is not None:
if isinstance(final_time, float):
my_final_time = self.quan(final_time, "code_time")
elif isinstance(final_time, tuple) and len(final_time) == 2:
my_final_time = self.quan(*final_time)
elif not isinstance(final_time, YTArray):
raise RuntimeError(
"Error: final_time must be given as a float or "
+ "tuple of (value, units)."
)
elif final_redshift is not None:
my_final_time = self.cosmology.t_from_z(final_redshift)
else:
my_final_time = self.final_time
my_initial_time.convert_to_units("s")
my_final_time.convert_to_units("s")
my_times = np.array([a["time"] for a in my_all_outputs])
my_indices = np.digitize([my_initial_time, my_final_time], my_times)
if my_initial_time == my_times[my_indices[0] - 1]:
my_indices[0] -= 1
my_outputs = my_all_outputs[my_indices[0] : my_indices[1]]
init_outputs = []
for output in my_outputs:
if os.path.exists(output["filename"]):
init_outputs.append(output["filename"])
DatasetSeries.__init__(
self, outputs=init_outputs, parallel=parallel, setup_function=setup_function
)
mylog.info("%d outputs loaded into time series.", len(init_outputs))
|
def get_time_series(
self,
time_data=True,
redshift_data=True,
initial_time=None,
final_time=None,
initial_redshift=None,
final_redshift=None,
initial_cycle=None,
final_cycle=None,
times=None,
redshifts=None,
tolerance=None,
parallel=True,
setup_function=None,
):
"""
Instantiate a DatasetSeries object for a set of outputs.
If no additional keywords given, a DatasetSeries object will be
created with all potential datasets created by the simulation.
Outputs can be gather by specifying a time or redshift range
(or combination of time and redshift), with a specific list of
times or redshifts, a range of cycle numbers (for cycle based
output), or by simply searching all subdirectories within the
simulation directory.
time_data : bool
Whether or not to include time outputs when gathering
datasets for time series.
Default: True.
redshift_data : bool
Whether or not to include redshift outputs when gathering
datasets for time series.
Default: True.
initial_time : tuple of type (float, str)
The earliest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (5.0, "Gyr"). If None, the initial time of the
simulation is used. This can be used in combination with
either final_time or final_redshift.
Default: None.
final_time : tuple of type (float, str)
The latest time for outputs to be included. This should be
given as the value and the string representation of the units.
For example, (13.7, "Gyr"). If None, the final time of the
simulation is used. This can be used in combination with either
initial_time or initial_redshift.
Default: None.
times : tuple of type (float array, str)
A list of times for which outputs will be found and the units
of those values. For example, ([0, 1, 2, 3], "s").
Default: None.
initial_redshift : float
The earliest redshift for outputs to be included. If None,
the initial redshift of the simulation is used. This can be
used in combination with either final_time or
final_redshift.
Default: None.
final_redshift : float
The latest redshift for outputs to be included. If None,
the final redshift of the simulation is used. This can be
used in combination with either initial_time or
initial_redshift.
Default: None.
redshifts : array_like
A list of redshifts for which outputs will be found.
Default: None.
initial_cycle : float
The earliest cycle for outputs to be included. If None,
the initial cycle of the simulation is used. This can
only be used with final_cycle.
Default: None.
final_cycle : float
The latest cycle for outputs to be included. If None,
the final cycle of the simulation is used. This can
only be used in combination with initial_cycle.
Default: None.
tolerance : float
Used in combination with "times" or "redshifts" keywords,
this is the tolerance within which outputs are accepted
given the requested times or redshifts. If None, the
nearest output is always taken.
Default: None.
parallel : bool/int
If True, the generated DatasetSeries will divide the work
such that a single processor works on each dataset. If an
integer is supplied, the work will be divided into that
number of jobs.
Default: True.
setup_function : callable, accepts a ds
This function will be called whenever a dataset is loaded.
Examples
--------
>>> import yt
>>> es = yt.simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
>>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
redshift_data=False)
>>> for ds in es:
... print(ds.current_time)
>>> es.get_time_series(redshifts=[3, 2, 1, 0])
>>> for ds in es:
... print(ds.current_time)
"""
if (
initial_redshift is not None or final_redshift is not None
) and not self.cosmological_simulation:
raise InvalidSimulationTimeSeries(
"An initial or final redshift has been given for a "
+ "noncosmological simulation."
)
if time_data and redshift_data:
my_all_outputs = self.all_outputs
elif time_data:
my_all_outputs = self.all_time_outputs
elif redshift_data:
my_all_outputs = self.all_redshift_outputs
else:
raise InvalidSimulationTimeSeries("Both time_data and redshift_data are False.")
if not my_all_outputs:
DatasetSeries.__init__(self, outputs=[], parallel=parallel)
mylog.info("0 outputs loaded into time series.")
return
# Apply selection criteria to the set.
if times is not None:
my_outputs = self._get_outputs_by_key(
"time", times, tolerance=tolerance, outputs=my_all_outputs
)
elif redshifts is not None:
my_outputs = self._get_outputs_by_key(
"redshift", redshifts, tolerance=tolerance, outputs=my_all_outputs
)
elif initial_cycle is not None or final_cycle is not None:
if initial_cycle is None:
initial_cycle = 0
else:
initial_cycle = max(initial_cycle, 0)
if final_cycle is None:
final_cycle = self.parameters["StopCycle"]
else:
final_cycle = min(final_cycle, self.parameters["StopCycle"])
my_outputs = my_all_outputs[
int(ceil(float(initial_cycle) / self.parameters["CycleSkipDataDump"])) : (
final_cycle / self.parameters["CycleSkipDataDump"]
)
+ 1
]
else:
if initial_time is not None:
if isinstance(initial_time, float):
initial_time = self.quan(initial_time, "code_time")
elif isinstance(initial_time, tuple) and len(initial_time) == 2:
initial_time = self.quan(*initial_time)
elif not isinstance(initial_time, YTArray):
raise RuntimeError(
"Error: initial_time must be given as a float or "
+ "tuple of (value, units)."
)
elif initial_redshift is not None:
my_initial_time = self.cosmology.t_from_z(initial_redshift)
else:
my_initial_time = self.initial_time
if final_time is not None:
if isinstance(final_time, float):
final_time = self.quan(final_time, "code_time")
elif isinstance(final_time, tuple) and len(final_time) == 2:
final_time = self.quan(*final_time)
elif not isinstance(final_time, YTArray):
raise RuntimeError(
"Error: final_time must be given as a float or "
+ "tuple of (value, units)."
)
my_final_time = final_time.in_units("s")
elif final_redshift is not None:
my_final_time = self.cosmology.t_from_z(final_redshift)
else:
my_final_time = self.final_time
my_initial_time.convert_to_units("s")
my_final_time.convert_to_units("s")
my_times = np.array([a["time"] for a in my_all_outputs])
my_indices = np.digitize([my_initial_time, my_final_time], my_times)
if my_initial_time == my_times[my_indices[0] - 1]:
my_indices[0] -= 1
my_outputs = my_all_outputs[my_indices[0] : my_indices[1]]
init_outputs = []
for output in my_outputs:
if os.path.exists(output["filename"]):
init_outputs.append(output["filename"])
DatasetSeries.__init__(
self, outputs=init_outputs, parallel=parallel, setup_function=setup_function
)
mylog.info("%d outputs loaded into time series.", len(init_outputs))
|
https://github.com/yt-project/yt/issues/2578
|
Traceback (most recent call last):
File "bug.py", line 4, in <module>
es.get_time_series(initial_time=(10.0, "Gyr"), final_time=(13.7, "Gyr"))
File "yt/frontends/enzo/simulation_handling.py", line 282, in get_time_series
my_initial_time.convert_to_units("s")
UnboundLocalError: local variable 'my_initial_time' referenced before assignment
|
UnboundLocalError
|
def box(self, left_edge, right_edge, **kwargs):
"""
box is a wrapper to the Region object for creating a region
without having to specify a *center* value. It assumes the center
is the midpoint between the left_edge and right_edge.
"""
# we handle units in the region data object
# but need to check if left_edge or right_edge is a
# list or other non-array iterable before calculating
# the center
if isinstance(left_edge[0], YTQuantity):
left_edge = YTArray(left_edge)
right_edge = YTArray(right_edge)
left_edge = np.asanyarray(left_edge, dtype="float64")
right_edge = np.asanyarray(right_edge, dtype="float64")
c = (left_edge + right_edge) / 2.0
return self.region(c, left_edge, right_edge, **kwargs)
|
def box(self, left_edge, right_edge, **kwargs):
"""
box is a wrapper to the Region object for creating a region
without having to specify a *center* value. It assumes the center
is the midpoint between the left_edge and right_edge.
"""
# we handle units in the region data object
# but need to check if left_edge or right_edge is a
# list or other non-array iterable before calculating
# the center
if not isinstance(left_edge, np.ndarray):
left_edge = np.array(left_edge, dtype="float64")
if not isinstance(right_edge, np.ndarray):
right_edge = np.array(right_edge, dtype="float64")
c = (left_edge + right_edge) / 2.0
return self.region(c, left_edge, right_edge, **kwargs)
|
https://github.com/yt-project/yt/issues/2560
|
$ python3 test_region.py
yt : [INFO ] 2020-04-25 14:16:34,930 Parameters: current_time = 7.24422756567562
yt : [INFO ] 2020-04-25 14:16:34,930 Parameters: domain_dimensions = [512 512 512]
yt : [INFO ] 2020-04-25 14:16:34,930 Parameters: domain_left_edge = [-5.12e+09 -5.12e+09 -5.12e+09]
yt : [INFO ] 2020-04-25 14:16:34,930 Parameters: domain_right_edge = [5.12e+09 5.12e+09 5.12e+09]
YTRegion (smallplt00595): , center=[-2.56e+09 0.00e+00 0.00e+00] cm, left_edge=[-5.12e+09 -5.12e+09 -5.12e+09] cm, right_edge=[0.00e+00 5.12e+09 5.12e+09] cm
Traceback (most recent call last):
File "test_region.py", line 22, in <module>
region2 = ds.box(ll, rr)
File "/raid/zingale/development/yt/yt/data_objects/static_output.py", line 854, in box
c = (left_edge + right_edge)/2.0
File "/raid/zingale/development/yt/yt/units/yt_array.py", line 1414, in __array_ufunc__
inps, units, ufunc, ret_class)
File "/raid/zingale/development/yt/yt/units/yt_array.py", line 174, in handle_preserve_units
raise YTUnitOperationError(ufunc, *units)
yt.utilities.exceptions.YTUnitOperationError: The <ufunc 'add'> operator for YTArrays with units (code_length) and (dimensionless) is not well defined.
|
yt.utilities.exceptions.YTUnitOperationError
|
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if "out" in kwargs:
out_orig = kwargs.pop("out")
if ufunc in multiple_output_operators:
outs = []
for arr in out_orig:
outs.append(arr.view(np.ndarray))
out = tuple(outs)
else:
out_element = out_orig[0]
if out_element.dtype.kind in ("u", "i"):
new_dtype = "f" + str(out_element.dtype.itemsize)
float_values = out_element.astype(new_dtype)
out_element.dtype = new_dtype
np.copyto(out_element, float_values)
out = out_element.view(np.ndarray)
else:
if ufunc in multiple_output_operators:
num_outputs = multiple_output_operators[ufunc]
out = (None,) * num_outputs
else:
out = None
if len(inputs) == 1:
_, inp, u = get_inp_u_unary(ufunc, inputs)
out_arr = func(np.asarray(inp), out=out, **kwargs)
if ufunc in (multiply, divide) and method == "reduce":
power_sign = POWER_SIGN_MAPPING[ufunc]
if "axis" in kwargs and kwargs["axis"] is not None:
unit = u ** (power_sign * inp.shape[kwargs["axis"]])
else:
unit = u ** (power_sign * inp.size)
else:
unit = self._ufunc_registry[ufunc](u)
ret_class = type(self)
elif len(inputs) == 2:
unit_operator = self._ufunc_registry[ufunc]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (comparison_unit, arctan2_unit):
inps, units = handle_comparison_units(inps, units, ufunc, ret_class)
elif unit_operator is preserve_units:
inps, units = handle_preserve_units(inps, units, ufunc, ret_class)
unit = unit_operator(*units)
out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), out=out, **kwargs)
if unit_operator in (multiply_units, divide_units):
out, out_arr, unit = handle_multiply_divide_units(unit, units, out, out_arr)
else:
if ufunc is clip:
inp = []
for i in inputs:
if isinstance(i, YTArray):
inp.append(i.to(inputs[0].units).view(np.ndarray))
elif iterable(i):
inp.append(np.asarray(i))
else:
inp.append(i)
if out is not None:
_out = out.view(np.ndarray)
else:
_out = None
out_arr = ufunc(*inp, out=_out)
unit = inputs[0].units
ret_class = type(inputs[0])
# This was added after unyt was spun out, but is not presently used:
# mul = 1
else:
raise RuntimeError(
"Support for the %s ufunc with %i inputs has not been "
"added to unyt_array." % (str(ufunc), len(inputs))
)
if unit is None:
out_arr = np.array(out_arr, copy=False)
elif ufunc in (modf, divmod_):
out_arr = tuple((ret_class(o, unit) for o in out_arr))
elif out_arr.size == 1:
out_arr = YTQuantity(np.asarray(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
out_arr = YTArray(np.asarray(out_arr), unit)
else:
out_arr = ret_class(np.asarray(out_arr), unit)
if out is not None:
if ufunc not in multiple_output_operators:
out_orig[0].flat[:] = out.flat[:]
if isinstance(out_orig[0], YTArray):
out_orig[0].units = unit
return out_arr
|
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if "out" in kwargs:
out_orig = kwargs.pop("out")
out = np.asarray(out_orig[0])
else:
out = None
if len(inputs) == 1:
_, inp, u = get_inp_u_unary(ufunc, inputs)
out_arr = func(np.asarray(inp), out=out, **kwargs)
if ufunc in (multiply, divide) and method == "reduce":
power_sign = POWER_SIGN_MAPPING[ufunc]
if "axis" in kwargs and kwargs["axis"] is not None:
unit = u ** (power_sign * inp.shape[kwargs["axis"]])
else:
unit = u ** (power_sign * inp.size)
else:
unit = self._ufunc_registry[ufunc](u)
ret_class = type(self)
elif len(inputs) == 2:
unit_operator = self._ufunc_registry[ufunc]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (comparison_unit, arctan2_unit):
inps, units = handle_comparison_units(inps, units, ufunc, ret_class)
elif unit_operator is preserve_units:
inps, units = handle_preserve_units(inps, units, ufunc, ret_class)
unit = unit_operator(*units)
out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), out=out, **kwargs)
if unit_operator in (multiply_units, divide_units):
out, out_arr, unit = handle_multiply_divide_units(unit, units, out, out_arr)
else:
if ufunc is clip:
inp = []
for i in inputs:
if isinstance(i, YTArray):
inp.append(i.to(inputs[0].units).view(np.ndarray))
elif iterable(i):
inp.append(np.asarray(i))
else:
inp.append(i)
if out is not None:
_out = out.view(np.ndarray)
else:
_out = None
out_arr = ufunc(*inp, out=_out)
unit = inputs[0].units
ret_class = type(inputs[0])
# This was added after unyt was spun out, but is not presently used:
# mul = 1
else:
raise RuntimeError(
"Support for the %s ufunc with %i inputs has not been "
"added to unyt_array." % (str(ufunc), len(inputs))
)
if unit is None:
out_arr = np.array(out_arr, copy=False)
elif ufunc in (modf, divmod_):
out_arr = tuple((ret_class(o, unit) for o in out_arr))
elif out_arr.size == 1:
out_arr = YTQuantity(np.asarray(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
out_arr = YTArray(np.asarray(out_arr), unit)
else:
out_arr = ret_class(np.asarray(out_arr), unit)
if out is not None:
out_orig[0].flat[:] = out.flat[:]
if isinstance(out_orig[0], YTArray):
out_orig[0].units = unit
return out_arr
|
https://github.com/yt-project/yt/issues/2445
|
Traceback (most recent call last):
3233 File "C:\Miniconda36-x64\envs\test\lib\site-packages\nose\case.py", line 197, in runTest
3234 self.test(*self.arg)
3235 File "C:\projects\yt\yt\units\tests\test_ytarray.py", line 878, in test_ufuncs
3236 unary_ufunc_comparison(ufunc, YTArray([.3, .4, .5], 'cm'))
3237 File "C:\projects\yt\yt\units\tests\test_ytarray.py", line 809, in unary_ufunc_comparison
3238 ret1, ret2 = ufunc(a)
3239 File "C:\projects\yt\yt\units\yt_array.py", line 1378, in __array_ufunc__
3240 out_arr = func(np.asarray(inp), out=out, **kwargs)
3241TypeError: 'out' must be a tuple of arrays
|
3241TypeError
|
def make_colormap(ctuple_list, name=None, interpolate=True):
"""
This generates a custom colormap based on the colors and spacings you
provide. Enter a ctuple_list, which consists of tuples of (color, spacing)
to return a colormap appropriate for use in yt. If you specify a
name, it will automatically be added to the current session as a valid
colormap.
Output colormap is in the format yt expects for adding a colormap to the
current session: a dictionary with the appropriate RGB channels each
consisting of a 256x3 array :
First number is the number at which we are defining a color breakpoint
Second number is the (0..1) number to interpolate to when coming *from below*
Third number is the (0..1) number to interpolate to when coming *from above*
Parameters
----------
ctuple_list: list of (color, float) tuples
The ctuple_list consists of pairs of (color, interval) tuples
identifying the colors to use in the colormap and the intervals
they take to change to the next color in the list. A color can
either be a string of the name of a color, or it can be an array
of 3 floats, each representing the intensity of R, G, and B on
a scale of 0 to 1. Valid color names and their equivalent
arrays are listed below.
Any interval can be given for the different color tuples, and
the total of all the intervals will be scaled to the 256 output
elements.
If a ctuple_list ends with a color and a non-zero interval,
a white 0-interval would be added to the end to finish the
interpolation. To avoid finishing with white, specify your own
zero-interval color at the end.
name: string, optional
If you wish this colormap to be added as a valid colormap to the
current session, specify a name here. Default: None
interpolation: boolean, optional
Designates whether or not the colormap will interpolate between
the colors provided or just give solid colors across the intervals.
Default: True
Preset Color Options
--------------------
'white' : np.array([255, 255, 255 ])/255.
'gray' : np.array([130, 130, 130])/255.
'dgray' : np.array([80, 80, 80])/255.
'black' : np.array([0, 0, 0])/255.
'blue' : np.array([0, 0, 255])/255.
'dblue' : np.array([0, 0, 160])/255.
'purple' : np.array([100, 0, 200])/255.
'dpurple' : np.array([66, 0, 133])/255.
'dred' : np.array([160, 0, 0])/255.
'red' : np.array([255, 0, 0])/255.
'orange' : np.array([255, 128, 0])/255.
'dorange' : np.array([200,100, 0])/255.
'yellow' : np.array([255, 255, 0])/255.
'dyellow' : np.array([200, 200, 0])/255.
'green' : np.array([0, 255, 0])/255.
'dgreen' : np.array([0, 160, 0])/255.
Examples
--------
To obtain a colormap that starts at black with equal intervals in green,
blue, red, yellow in that order and interpolation between those colors.
(In reality, it starts at black, takes an interval of 10 to interpolate to
green, then an interval of 10 to interpolate to blue, then an interval of
10 to interpolate to red.)
>>> cm = make_colormap([('black', 10), ('green', 10), ('blue', 10),
... ('red', 0)])
To add a colormap that has five equal blocks of solid major colors to
the current session as "steps":
>>> make_colormap([('red', 10), ('orange', 10), ('yellow', 10),
... ('green', 10), ('blue', 10)], name="steps",
... interpolate=False)
To add a colormap that looks like the French flag (i.e. equal bands of
blue, white, and red) using your own RGB keys, then to display it:
>>> make_colormap([([0,0,1], 10), ([1,1,1], 10), ([1,0,0], 10)],
... name='french_flag', interpolate=False)
>>> show_colormaps(['french_flag'])
"""
# aliases for different colors
color_dict = {
"white": np.array([255, 255, 255]) / 255.0,
"gray": np.array([130, 130, 130]) / 255.0,
"dgray": np.array([80, 80, 80]) / 255.0,
"black": np.array([0, 0, 0]) / 255.0,
"blue": np.array([0, 0, 255]) / 255.0,
"dblue": np.array([0, 0, 160]) / 255.0,
"purple": np.array([100, 0, 200]) / 255.0,
"dpurple": np.array([66, 0, 133]) / 255.0,
"dred": np.array([160, 0, 0]) / 255.0,
"red": np.array([255, 0, 0]) / 255.0,
"orange": np.array([255, 128, 0]) / 255.0,
"dorange": np.array([200, 100, 0]) / 255.0,
"yellow": np.array([255, 255, 0]) / 255.0,
"dyellow": np.array([200, 200, 0]) / 255.0,
"green": np.array([0, 255, 0]) / 255.0,
"dgreen": np.array([0, 160, 0]) / 255.0,
}
cmap = np.zeros((256, 3))
# If the user provides a list with a non-zero final interval, it
# doesn't make sense because you have an interval but no final
# color to which it interpolates. So provide a 0-length white final
# interval to end the previous interval in white.
if ctuple_list[-1][1] != 0:
ctuple_list.append(("white", 0))
# Figure out how many intervals there are total.
rolling_index = 0
for i, (color, interval) in enumerate(ctuple_list):
if isinstance(color, string_types):
ctuple_list[i] = (color_dict[color], interval)
rolling_index += interval
scale = 256.0 / rolling_index
n = len(ctuple_list)
# Step through each ctuple and interpolate from one color to the
# next over the interval provided
rolling_index = 0
for i in range(n - 1):
color, interval = ctuple_list[i]
interval *= scale
next_index = rolling_index + interval
next_color, next_interval = ctuple_list[i + 1]
if not interpolate:
next_color = color
# Interpolate the R, G, and B channels from one color to the next
# Use np.round to make sure you're on a discrete index
interval = int(np.round(next_index) - np.round(rolling_index))
for j in np.arange(3):
cmap[int(np.rint(rolling_index)) : int(np.rint(next_index)), j] = (
np.linspace(color[j], next_color[j], num=interval)
)
rolling_index = next_index
# Return a dictionary with the appropriate RGB channels each consisting of
# a 256x3 array in the format that is expected by add_cmap() to add a
# colormap to the session.
# The format is as follows:
# First number is the number at which we are defining a color breakpoint
# Second number is the (0..1) number to interpolate to when coming *from below*
# Third number is the (0..1) number to interpolate to when coming *from above*
_vs = np.linspace(0, 1, 256)
cdict = {
"red": np.transpose([_vs, cmap[:, 0], cmap[:, 0]]),
"green": np.transpose([_vs, cmap[:, 1], cmap[:, 1]]),
"blue": np.transpose([_vs, cmap[:, 2], cmap[:, 2]]),
}
if name is not None:
add_cmap(name, cdict)
return cdict
|
def make_colormap(ctuple_list, name=None, interpolate=True):
"""
This generates a custom colormap based on the colors and spacings you
provide. Enter a ctuple_list, which consists of tuples of (color, spacing)
to return a colormap appropriate for use in yt. If you specify a
name, it will automatically be added to the current session as a valid
colormap.
Output colormap is in the format yt expects for adding a colormap to the
current session: a dictionary with the appropriate RGB channels each
consisting of a 256x3 array :
First number is the number at which we are defining a color breakpoint
Second number is the (0..1) number to interpolate to when coming *from below*
Third number is the (0..1) number to interpolate to when coming *from above*
Parameters
----------
ctuple_list: list of (color, float) tuples
The ctuple_list consists of pairs of (color, interval) tuples
identifying the colors to use in the colormap and the intervals
they take to change to the next color in the list. A color can
either be a string of the name of a color, or it can be an array
of 3 floats, each representing the intensity of R, G, and B on
a scale of 0 to 1. Valid color names and their equivalent
arrays are listed below.
Any interval can be given for the different color tuples, and
the total of all the intervals will be scaled to the 256 output
elements.
If a ctuple_list ends with a color and a non-zero interval,
a white 0-interval would be added to the end to finish the
interpolation. To avoid finishing with white, specify your own
zero-interval color at the end.
name: string, optional
If you wish this colormap to be added as a valid colormap to the
current session, specify a name here. Default: None
interpolation: boolean, optional
Designates whether or not the colormap will interpolate between
the colors provided or just give solid colors across the intervals.
Default: True
Preset Color Options
--------------------
'white' : np.array([255, 255, 255 ])/255.
'gray' : np.array([130, 130, 130])/255.
'dgray' : np.array([80, 80, 80])/255.
'black' : np.array([0, 0, 0])/255.
'blue' : np.array([0, 0, 255])/255.
'dblue' : np.array([0, 0, 160])/255.
'purple' : np.array([100, 0, 200])/255.
'dpurple' : np.array([66, 0, 133])/255.
'dred' : np.array([160, 0, 0])/255.
'red' : np.array([255, 0, 0])/255.
'orange' : np.array([255, 128, 0])/255.
'dorange' : np.array([200,100, 0])/255.
'yellow' : np.array([255, 255, 0])/255.
'dyellow' : np.array([200, 200, 0])/255.
'green' : np.array([0, 255, 0])/255.
'dgreen' : np.array([0, 160, 0])/255.
Examples
--------
To obtain a colormap that starts at black with equal intervals in green,
blue, red, yellow in that order and interpolation between those colors.
(In reality, it starts at black, takes an interval of 10 to interpolate to
green, then an interval of 10 to interpolate to blue, then an interval of
10 to interpolate to red.)
>>> cm = make_colormap([('black', 10), ('green', 10), ('blue', 10),
... ('red', 0)])
To add a colormap that has five equal blocks of solid major colors to
the current session as "steps":
>>> make_colormap([('red', 10), ('orange', 10), ('yellow', 10),
... ('green', 10), ('blue', 10)], name="steps",
... interpolate=False)
To add a colormap that looks like the French flag (i.e. equal bands of
blue, white, and red) using your own RGB keys, then to display it:
>>> make_colormap([([0,0,1], 10), ([1,1,1], 10), ([1,0,0], 10)],
... name='french_flag', interpolate=False)
>>> show_colormaps(['french_flag'])
"""
# aliases for different colors
color_dict = {
"white": np.array([255, 255, 255]) / 255.0,
"gray": np.array([130, 130, 130]) / 255.0,
"dgray": np.array([80, 80, 80]) / 255.0,
"black": np.array([0, 0, 0]) / 255.0,
"blue": np.array([0, 0, 255]) / 255.0,
"dblue": np.array([0, 0, 160]) / 255.0,
"purple": np.array([100, 0, 200]) / 255.0,
"dpurple": np.array([66, 0, 133]) / 255.0,
"dred": np.array([160, 0, 0]) / 255.0,
"red": np.array([255, 0, 0]) / 255.0,
"orange": np.array([255, 128, 0]) / 255.0,
"dorange": np.array([200, 100, 0]) / 255.0,
"yellow": np.array([255, 255, 0]) / 255.0,
"dyellow": np.array([200, 200, 0]) / 255.0,
"green": np.array([0, 255, 0]) / 255.0,
"dgreen": np.array([0, 160, 0]) / 255.0,
}
cmap = np.zeros((256, 3))
# If the user provides a list with a non-zero final interval, it
# doesn't make sense because you have an interval but no final
# color to which it interpolates. So provide a 0-length white final
# interval to end the previous interval in white.
if ctuple_list[-1][1] != 0:
ctuple_list.append(("white", 0))
# Figure out how many intervals there are total.
rolling_index = 0
for i, (color, interval) in enumerate(ctuple_list):
if isinstance(color, string_types):
ctuple_list[i] = (color_dict[color], interval)
rolling_index += interval
scale = 256.0 / rolling_index
n = len(ctuple_list)
# Step through each ctuple and interpolate from one color to the
# next over the interval provided
rolling_index = 0
for i in range(n - 1):
color, interval = ctuple_list[i]
interval *= scale
next_index = rolling_index + interval
next_color, next_interval = ctuple_list[i + 1]
if not interpolate:
next_color = color
# Interpolate the R, G, and B channels from one color to the next
# Use np.round to make sure you're on a discrete index
interval = np.round(next_index) - np.round(rolling_index)
for j in np.arange(3):
cmap[int(np.rint(rolling_index)) : int(np.rint(next_index)), j] = (
np.linspace(color[j], next_color[j], interval)
)
rolling_index = next_index
# Return a dictionary with the appropriate RGB channels each consisting of
# a 256x3 array in the format that is expected by add_cmap() to add a
# colormap to the session.
# The format is as follows:
# First number is the number at which we are defining a color breakpoint
# Second number is the (0..1) number to interpolate to when coming *from below*
# Third number is the (0..1) number to interpolate to when coming *from above*
_vs = np.linspace(0, 1, 256)
cdict = {
"red": np.transpose([_vs, cmap[:, 0], cmap[:, 0]]),
"green": np.transpose([_vs, cmap[:, 1], cmap[:, 1]]),
"blue": np.transpose([_vs, cmap[:, 2], cmap[:, 2]]),
}
if name is not None:
add_cmap(name, cdict)
return cdict
|
https://github.com/yt-project/yt/issues/2445
|
Traceback (most recent call last):
3233 File "C:\Miniconda36-x64\envs\test\lib\site-packages\nose\case.py", line 197, in runTest
3234 self.test(*self.arg)
3235 File "C:\projects\yt\yt\units\tests\test_ytarray.py", line 878, in test_ufuncs
3236 unary_ufunc_comparison(ufunc, YTArray([.3, .4, .5], 'cm'))
3237 File "C:\projects\yt\yt\units\tests\test_ytarray.py", line 809, in unary_ufunc_comparison
3238 ret1, ret2 = ufunc(a)
3239 File "C:\projects\yt\yt\units\yt_array.py", line 1378, in __array_ufunc__
3240 out_arr = func(np.asarray(inp), out=out, **kwargs)
3241TypeError: 'out' must be a tuple of arrays
|
3241TypeError
|
def __init__(
self, data_source, conditionals, ds=None, field_parameters=None, base_object=None
):
validate_object(data_source, YTSelectionContainer)
validate_iterable(conditionals)
for condition in conditionals:
validate_object(condition, string_types)
validate_object(ds, Dataset)
validate_object(field_parameters, dict)
validate_object(base_object, YTSelectionContainer)
if base_object is not None:
# passing base_object explicitly has been deprecated,
# but we handle it here for backward compatibility
if data_source is not None:
raise RuntimeError("Cannot use both base_object and data_source")
data_source = base_object
super(YTCutRegion, self).__init__(
data_source.center, ds, field_parameters, data_source=data_source
)
self.conditionals = ensure_list(conditionals)
self.base_object = data_source
self._selector = None
|
def __init__(
self, data_source, conditionals, ds=None, field_parameters=None, base_object=None
):
validate_object(data_source, YTSelectionContainer)
validate_iterable(conditionals)
for condition in conditionals:
validate_object(condition, string_types)
validate_object(ds, Dataset)
validate_object(field_parameters, dict)
validate_object(base_object, YTSelectionContainer)
if base_object is not None:
# passing base_object explicitly has been deprecated,
# but we handle it here for backward compatibility
if data_source is not None:
raise RuntimeError("Cannot use both base_object and data_source")
data_source = base_object
super(YTCutRegion, self).__init__(
data_source.center, ds, field_parameters, data_source=data_source
)
self.conditionals = ensure_list(conditionals)
self.base_object = data_source
self._selector = None
self._particle_mask = {}
|
https://github.com/yt-project/yt/issues/2104
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-83-3704089812fe> in <module>
5 sp = ds.sphere((0.5, 0.5, 0.5), (5, "kpc"))
6 dense_sp = sp.cut_region(['obj["H_p0_number_density"]>= 1e-2'])
----> 7 dense_sp.quantities.angular_momentum_vector()
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in __call__(self, *args, **kwargs)
67 storage = {}
68 for sto, ds in parallel_objects(chunks, -1, storage = storage):
---> 69 sto.result = self.process_chunk(ds, *args, **kwargs)
70 # Now storage will have everything, and will be done via pickling, so
71 # the units will be preserved. (Credit to Nathan for this
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in process_chunk(self, data, use_gas, use_particles, particle_type)
493 rvals.extend([(data[self.particle_type, "particle_specific_angular_momentum_%s" % axis] *
494 data[self.particle_type, "particle_mass"]).sum(dtype=np.float64) \
--> 495 for axis in "xyz"])
496 rvals.append(data[self.particle_type, "particle_mass"].sum(dtype=np.float64))
497 return rvals
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in <listcomp>(.0)
493 rvals.extend([(data[self.particle_type, "particle_specific_angular_momentum_%s" % axis] *
494 data[self.particle_type, "particle_mass"]).sum(dtype=np.float64) \
--> 495 for axis in "xyz"])
496 rvals.append(data[self.particle_type, "particle_mass"].sum(dtype=np.float64))
497 return rvals
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/data_containers.py in __getitem__(self, key)
253 return self.field_data[f]
254 else:
--> 255 self.get_data(f)
256 # fi.units is the unit expression string. We depend on the registry
257 # hanging off the dataset to define this unit object.
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/selection_data_containers.py in get_data(self, fields)
901 parent = getattr(self, "parent", self.base_object)
902 self.field_data[field] = \
--> 903 parent[field][self._part_ind(field[0])]
904 else:
905 self.field_data[field] = self.base_object[field][ind]
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/units/yt_array.py in __getitem__(self, item)
1056
1057 def __getitem__(self, item):
-> 1058 ret = super(YTArray, self).__getitem__(item)
1059 if ret.shape == ():
1060 return YTQuantity(ret, self.units, bypass_validation=True)
IndexError: boolean index did not match indexed array along dimension 0; dimension is 44461 but corresponding boolean dimension is 3218
|
IndexError
|
def _part_ind(self, ptype):
# If scipy is installed, use the fast KD tree
# implementation. Else, fall back onto the direct
# brute-force algorithm.
try:
_scipy.spatial.KDTree
return self._part_ind_KDTree(ptype)
except ImportError:
return self._part_ind_brute_force(ptype)
|
def _part_ind(self, ptype):
if self._particle_mask.get(ptype) is None:
# If scipy is installed, use the fast KD tree
# implementation. Else, fall back onto the direct
# brute-force algorithm.
try:
_scipy.spatial.KDTree
mask = self._part_ind_KDTree(ptype)
except ImportError:
mask = self._part_ind_brute_force(ptype)
self._particle_mask[ptype] = mask
return self._particle_mask[ptype]
|
https://github.com/yt-project/yt/issues/2104
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-83-3704089812fe> in <module>
5 sp = ds.sphere((0.5, 0.5, 0.5), (5, "kpc"))
6 dense_sp = sp.cut_region(['obj["H_p0_number_density"]>= 1e-2'])
----> 7 dense_sp.quantities.angular_momentum_vector()
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in __call__(self, *args, **kwargs)
67 storage = {}
68 for sto, ds in parallel_objects(chunks, -1, storage = storage):
---> 69 sto.result = self.process_chunk(ds, *args, **kwargs)
70 # Now storage will have everything, and will be done via pickling, so
71 # the units will be preserved. (Credit to Nathan for this
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in process_chunk(self, data, use_gas, use_particles, particle_type)
493 rvals.extend([(data[self.particle_type, "particle_specific_angular_momentum_%s" % axis] *
494 data[self.particle_type, "particle_mass"]).sum(dtype=np.float64) \
--> 495 for axis in "xyz"])
496 rvals.append(data[self.particle_type, "particle_mass"].sum(dtype=np.float64))
497 return rvals
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in <listcomp>(.0)
493 rvals.extend([(data[self.particle_type, "particle_specific_angular_momentum_%s" % axis] *
494 data[self.particle_type, "particle_mass"]).sum(dtype=np.float64) \
--> 495 for axis in "xyz"])
496 rvals.append(data[self.particle_type, "particle_mass"].sum(dtype=np.float64))
497 return rvals
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/data_containers.py in __getitem__(self, key)
253 return self.field_data[f]
254 else:
--> 255 self.get_data(f)
256 # fi.units is the unit expression string. We depend on the registry
257 # hanging off the dataset to define this unit object.
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/selection_data_containers.py in get_data(self, fields)
901 parent = getattr(self, "parent", self.base_object)
902 self.field_data[field] = \
--> 903 parent[field][self._part_ind(field[0])]
904 else:
905 self.field_data[field] = self.base_object[field][ind]
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/units/yt_array.py in __getitem__(self, item)
1056
1057 def __getitem__(self, item):
-> 1058 ret = super(YTArray, self).__getitem__(item)
1059 if ret.shape == ():
1060 return YTQuantity(ret, self.units, bypass_validation=True)
IndexError: boolean index did not match indexed array along dimension 0; dimension is 44461 but corresponding boolean dimension is 3218
|
IndexError
|
def _get_gadget_format(filename, header_size):
# check and return gadget binary format with file endianness
ff = open(filename, "rb")
(rhead,) = struct.unpack("<I", ff.read(4))
ff.close()
if rhead == _byte_swap_32(8):
return 2, ">"
elif rhead == 8:
return 2, "<"
elif rhead == _byte_swap_32(header_size):
return 1, ">"
elif rhead == header_size:
return 1, "<"
else:
raise RuntimeError("Incorrect Gadget format %s!" % str(rhead))
|
def _get_gadget_format(filename):
# check and return gadget binary format with file endianness
ff = open(filename, "rb")
(rhead,) = struct.unpack("<I", ff.read(4))
ff.close()
if rhead == 134217728:
return 2, ">"
elif rhead == 8:
return 2, "<"
elif rhead == 65536:
return 1, ">"
elif rhead == 256:
return 1, "<"
else:
raise RuntimeError("Incorrect Gadget format %s!" % str(rhead))
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def __init__(self, ds, io, filename, file_id):
gformat = _get_gadget_format(filename, ds._header_size)
with open(filename, "rb") as f:
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
self.header = read_record(f, ds._header_spec, endian=gformat[1])
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
self._position_offset = f.tell()
f.seek(0, os.SEEK_END)
self._file_size = f.tell()
super(GadgetBinaryFile, self).__init__(ds, io, filename, file_id)
|
def __init__(self, ds, io, filename, file_id):
gformat = _get_gadget_format(filename)
with open(filename, "rb") as f:
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
self.header = read_record(f, ds._header_spec, endian=gformat[1])
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
self._position_offset = f.tell()
f.seek(0, os.SEEK_END)
self._file_size = f.tell()
super(GadgetBinaryFile, self).__init__(ds, io, filename, file_id)
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def __init__(
self,
filename,
dataset_type="gadget_binary",
additional_fields=(),
unit_base=None,
n_ref=64,
over_refine_factor=1,
kernel_name=None,
index_ptype="all",
bounding_box=None,
header_spec="default",
field_spec="default",
ptype_spec="default",
units_override=None,
unit_system="cgs",
use_dark_factor=False,
w_0=-1.0,
w_a=0.0,
):
if self._instantiated:
return
self._header_spec = self._setup_binary_spec(header_spec, gadget_header_specs)
self._header_size = _compute_header_size(self._header_spec)
if self._header_size != 256:
only_on_root(
mylog.warn,
"Non-standard header size is detected! "
"Gadget-2 standard header is 256 bytes, but yours is %s. "
"Make sure a non-standard header is actually expected. "
"Otherwise something is wrong, "
"and you might want to check how the dataset is loaded. "
"Futher information about header specification can be found in "
"https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.",
self._header_size,
)
self._field_spec = self._setup_binary_spec(field_spec, gadget_field_specs)
self._ptype_spec = self._setup_binary_spec(ptype_spec, gadget_ptype_specs)
self.index_ptype = index_ptype
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
# integration the redshift will be zero.
unit_base["cmcm"] = 1.0 / unit_base["UnitLength_in_cm"]
self._unit_base = unit_base
if bounding_box is not None:
bbox = np.array(bounding_box, dtype="float64")
if bbox.shape == (2, 3):
bbox = bbox.transpose()
self.domain_left_edge = bbox[:, 0]
self.domain_right_edge = bbox[:, 1]
else:
self.domain_left_edge = self.domain_right_edge = None
if units_override is not None:
raise RuntimeError(
"units_override is not supported for GadgetDataset. "
+ "Use unit_base instead."
)
# Set dark energy parameters before cosmology object is created
self.use_dark_factor = use_dark_factor
self.w_0 = w_0
self.w_a = w_a
super(GadgetDataset, self).__init__(
filename,
dataset_type=dataset_type,
unit_system=unit_system,
n_ref=n_ref,
over_refine_factor=over_refine_factor,
kernel_name=kernel_name,
)
if self.cosmological_simulation:
self.time_unit.convert_to_units("s/h")
self.length_unit.convert_to_units("kpccm/h")
self.mass_unit.convert_to_units("g/h")
else:
self.time_unit.convert_to_units("s")
self.length_unit.convert_to_units("kpc")
self.mass_unit.convert_to_units("Msun")
|
def __init__(
self,
filename,
dataset_type="gadget_binary",
additional_fields=(),
unit_base=None,
n_ref=64,
over_refine_factor=1,
kernel_name=None,
index_ptype="all",
bounding_box=None,
header_spec="default",
field_spec="default",
ptype_spec="default",
units_override=None,
unit_system="cgs",
use_dark_factor=False,
w_0=-1.0,
w_a=0.0,
):
if self._instantiated:
return
self._header_spec = self._setup_binary_spec(header_spec, gadget_header_specs)
self._field_spec = self._setup_binary_spec(field_spec, gadget_field_specs)
self._ptype_spec = self._setup_binary_spec(ptype_spec, gadget_ptype_specs)
self.index_ptype = index_ptype
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
# integration the redshift will be zero.
unit_base["cmcm"] = 1.0 / unit_base["UnitLength_in_cm"]
self._unit_base = unit_base
if bounding_box is not None:
bbox = np.array(bounding_box, dtype="float64")
if bbox.shape == (2, 3):
bbox = bbox.transpose()
self.domain_left_edge = bbox[:, 0]
self.domain_right_edge = bbox[:, 1]
else:
self.domain_left_edge = self.domain_right_edge = None
if units_override is not None:
raise RuntimeError(
"units_override is not supported for GadgetDataset. "
+ "Use unit_base instead."
)
# Set dark energy parameters before cosmology object is created
self.use_dark_factor = use_dark_factor
self.w_0 = w_0
self.w_a = w_a
super(GadgetDataset, self).__init__(
filename,
dataset_type=dataset_type,
unit_system=unit_system,
n_ref=n_ref,
over_refine_factor=over_refine_factor,
kernel_name=kernel_name,
)
if self.cosmological_simulation:
self.time_unit.convert_to_units("s/h")
self.length_unit.convert_to_units("kpccm/h")
self.mass_unit.convert_to_units("g/h")
else:
self.time_unit.convert_to_units("s")
self.length_unit.convert_to_units("kpc")
self.mass_unit.convert_to_units("Msun")
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _setup_binary_spec(cls, spec, spec_dict):
if isinstance(spec, str):
_hs = ()
for hs in spec.split("+"):
_hs += spec_dict[hs]
spec = _hs
return spec
|
def _setup_binary_spec(self, spec, spec_dict):
if isinstance(spec, str):
_hs = ()
for hs in spec.split("+"):
_hs += spec_dict[hs]
spec = _hs
return spec
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _get_hvals(self):
# The entries in this header are capitalized and named to match Table 4
# in the GADGET-2 user guide.
gformat = _get_gadget_format(self.parameter_filename, self._header_size)
f = open(self.parameter_filename, "rb")
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
hvals = read_record(f, self._header_spec, endian=gformat[1])
for i in hvals:
if len(hvals[i]) == 1:
hvals[i] = hvals[i][0]
return hvals
|
def _get_hvals(self):
# The entries in this header are capitalized and named to match Table 4
# in the GADGET-2 user guide.
gformat = _get_gadget_format(self.parameter_filename)
f = open(self.parameter_filename, "rb")
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
hvals = read_record(f, self._header_spec, endian=gformat[1])
for i in hvals:
if len(hvals[i]) == 1:
hvals[i] = hvals[i][0]
return hvals
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _validate_header(filename, header_size):
"""
This method automatically detects whether the Gadget file is big/little endian
and is not corrupt/invalid using the first 4 bytes in the file. It returns a
tuple of (Valid, endianswap) where Valid is a boolean that is true if the file
is a Gadget binary file, and endianswap is the endianness character '>' or '<'.
"""
try:
f = open(filename, "rb")
except IOError:
try:
f = open(filename + ".0")
except IOError:
return False, 1
# First int32 is 256 for a Gadget2 binary file with SnapFormat=1,
# 8 for a Gadget2 binary file with SnapFormat=2 file,
# or the byte swapped equivalents.
# The int32 following the header (first 4+256 bytes) must equal this
# number.
# Note that 256 is the Gadget2 standard value, but other value could be
# set using the header_size argument.
try:
(rhead,) = struct.unpack("<I", f.read(4))
except struct.error:
f.close()
return False, 1
# Use value to check endianness
if rhead == header_size:
endianswap = "<"
elif rhead == _byte_swap_32(header_size):
endianswap = ">"
elif rhead in (8, _byte_swap_32(8)):
# This is only true for snapshot format 2
# we do not currently support double precision
# snap format 2 data
f.close()
return True, "f4"
else:
f.close()
return False, 1
# Read in particle number from header
np0 = sum(struct.unpack(endianswap + "IIIIII", f.read(6 * 4)))
# Read in size of position block. It should be 4 bytes per float,
# with 3 coordinates (x,y,z) per particle. (12 bytes per particle)
f.seek(4 + header_size + 4, 0)
np1 = struct.unpack(endianswap + "I", f.read(4))[0] / (4 * 3)
f.close()
# Compare
if np0 == np1:
return True, "f4"
elif np1 == 2 * np0:
return True, "f8"
else:
return False, 1
|
def _validate_header(filename):
"""
This method automatically detects whether the Gadget file is big/little endian
and is not corrupt/invalid using the first 4 bytes in the file. It returns a
tuple of (Valid, endianswap) where Valid is a boolean that is true if the file
is a Gadget binary file, and endianswap is the endianness character '>' or '<'.
"""
try:
f = open(filename, "rb")
except IOError:
try:
f = open(filename + ".0")
except IOError:
return False, 1
# First int32 is 256 for a Gadget2 binary file with SnapFormat=1,
# 8 for a Gadget2 binary file with SnapFormat=2 file,
# or the byte swapped equivalents (65536 and 134217728).
# The int32 following the header (first 4+256 bytes) must equal this
# number.
try:
(rhead,) = struct.unpack("<I", f.read(4))
except struct.error:
f.close()
return False, 1
# Use value to check endianness
if rhead == 256:
endianswap = "<"
elif rhead == 65536:
endianswap = ">"
elif rhead in (8, 134217728):
# This is only true for snapshot format 2
# we do not currently support double precision
# snap format 2 data
f.close()
return True, "f4"
else:
f.close()
return False, 1
# Read in particle number from header
np0 = sum(struct.unpack(endianswap + "IIIIII", f.read(6 * 4)))
# Read in size of position block. It should be 4 bytes per float,
# with 3 coordinates (x,y,z) per particle. (12 bytes per particle)
f.seek(4 + 256 + 4, 0)
np1 = struct.unpack(endianswap + "I", f.read(4))[0] / (4 * 3)
f.close()
# Compare
if np0 == np1:
return True, "f4"
elif np1 == 2 * np0:
return True, "f8"
else:
return False, 1
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _is_valid(cls, *args, **kwargs):
if "header_spec" in kwargs:
# Compute header size if header is customized
header_spec = cls._setup_binary_spec(kwargs["header_spec"], gadget_header_specs)
header_size = _compute_header_size(header_spec)
else:
header_size = 256
# First 4 bytes used to check load
return GadgetDataset._validate_header(args[0], header_size)[0]
|
def _is_valid(self, *args, **kwargs):
# First 4 bytes used to check load
return GadgetDataset._validate_header(args[0])[0]
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def __init__(self, ds, *args, **kwargs):
self._vector_fields = dict(self._vector_fields)
self._fields = ds._field_spec
self._ptypes = ds._ptype_spec
self.data_files = set([])
gformat = _get_gadget_format(ds.parameter_filename, ds._header_size)
# gadget format 1 original, 2 with block name
self._format = gformat[0]
self._endian = gformat[1]
super(IOHandlerGadgetBinary, self).__init__(ds, *args, **kwargs)
|
def __init__(self, ds, *args, **kwargs):
self._vector_fields = dict(self._vector_fields)
self._fields = ds._field_spec
self._ptypes = ds._ptype_spec
self.data_files = set([])
gformat = _get_gadget_format(ds.parameter_filename)
# gadget format 1 original, 2 with block name
self._format = gformat[0]
self._endian = gformat[1]
super(IOHandlerGadgetBinary, self).__init__(ds, *args, **kwargs)
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _initialize_index(self, data_file, regions):
DLE = data_file.ds.domain_left_edge
DRE = data_file.ds.domain_right_edge
self._float_type = data_file.ds._validate_header(
data_file.filename, data_file.ds._header_size
)[1]
if self.index_ptype == "all":
count = sum(data_file.total_particles.values())
return self._get_morton_from_position(data_file, count, 0, regions, DLE, DRE)
else:
idpos = self._ptypes.index(self.index_ptype)
count = data_file.total_particles.get(self.index_ptype)
account = [0] + [data_file.total_particles.get(ptype) for ptype in self._ptypes]
account = np.cumsum(account)
return self._get_morton_from_position(
data_file, account, account[idpos], regions, DLE, DRE
)
|
def _initialize_index(self, data_file, regions):
DLE = data_file.ds.domain_left_edge
DRE = data_file.ds.domain_right_edge
self._float_type = data_file.ds._validate_header(data_file.filename)[1]
if self.index_ptype == "all":
count = sum(data_file.total_particles.values())
return self._get_morton_from_position(data_file, count, 0, regions, DLE, DRE)
else:
idpos = self._ptypes.index(self.index_ptype)
count = data_file.total_particles.get(self.index_ptype)
account = [0] + [data_file.total_particles.get(ptype) for ptype in self._ptypes]
account = np.cumsum(account)
return self._get_morton_from_position(
data_file, account, account[idpos], regions, DLE, DRE
)
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def __init__(self, ds, io, filename, file_id):
header = ds._header
self.header = header.value
self._position_offset = header.position_offset
with header.open() as f:
self._file_size = f.seek(0, os.SEEK_END)
super(GadgetBinaryFile, self).__init__(ds, io, filename, file_id)
|
def __init__(self, ds, io, filename, file_id):
gformat = _get_gadget_format(filename, ds._header_size)
with open(filename, "rb") as f:
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
self.header = read_record(f, ds._header_spec, endian=gformat[1])
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
self._position_offset = f.tell()
f.seek(0, os.SEEK_END)
self._file_size = f.tell()
super(GadgetBinaryFile, self).__init__(ds, io, filename, file_id)
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def __init__(
self,
filename,
dataset_type="gadget_binary",
additional_fields=(),
unit_base=None,
n_ref=64,
over_refine_factor=1,
kernel_name=None,
index_ptype="all",
bounding_box=None,
header_spec="default",
field_spec="default",
ptype_spec="default",
units_override=None,
unit_system="cgs",
use_dark_factor=False,
w_0=-1.0,
w_a=0.0,
):
if self._instantiated:
return
self._header = GadgetBinaryHeader(filename, header_spec)
header_size = self._header.size
if header_size != [256]:
only_on_root(
mylog.warn,
"Non-standard header size is detected! "
"Gadget-2 standard header is 256 bytes, but yours is %s. "
"Make sure a non-standard header is actually expected. "
"Otherwise something is wrong, "
"and you might want to check how the dataset is loaded. "
"Futher information about header specification can be found in "
"https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.",
header_size,
)
self._field_spec = self._setup_binary_spec(field_spec, gadget_field_specs)
self._ptype_spec = self._setup_binary_spec(ptype_spec, gadget_ptype_specs)
self.index_ptype = index_ptype
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
# integration the redshift will be zero.
unit_base["cmcm"] = 1.0 / unit_base["UnitLength_in_cm"]
self._unit_base = unit_base
if bounding_box is not None:
bbox = np.array(bounding_box, dtype="float64")
if bbox.shape == (2, 3):
bbox = bbox.transpose()
self.domain_left_edge = bbox[:, 0]
self.domain_right_edge = bbox[:, 1]
else:
self.domain_left_edge = self.domain_right_edge = None
if units_override is not None:
raise RuntimeError(
"units_override is not supported for GadgetDataset. "
+ "Use unit_base instead."
)
# Set dark energy parameters before cosmology object is created
self.use_dark_factor = use_dark_factor
self.w_0 = w_0
self.w_a = w_a
super(GadgetDataset, self).__init__(
filename,
dataset_type=dataset_type,
unit_system=unit_system,
n_ref=n_ref,
over_refine_factor=over_refine_factor,
kernel_name=kernel_name,
)
if self.cosmological_simulation:
self.time_unit.convert_to_units("s/h")
self.length_unit.convert_to_units("kpccm/h")
self.mass_unit.convert_to_units("g/h")
else:
self.time_unit.convert_to_units("s")
self.length_unit.convert_to_units("kpc")
self.mass_unit.convert_to_units("Msun")
|
def __init__(
self,
filename,
dataset_type="gadget_binary",
additional_fields=(),
unit_base=None,
n_ref=64,
over_refine_factor=1,
kernel_name=None,
index_ptype="all",
bounding_box=None,
header_spec="default",
field_spec="default",
ptype_spec="default",
units_override=None,
unit_system="cgs",
use_dark_factor=False,
w_0=-1.0,
w_a=0.0,
):
if self._instantiated:
return
self._header_spec = self._setup_binary_spec(header_spec, gadget_header_specs)
self._header_size = _compute_header_size(self._header_spec)
if self._header_size != 256:
only_on_root(
mylog.warn,
"Non-standard header size is detected! "
"Gadget-2 standard header is 256 bytes, but yours is %s. "
"Make sure a non-standard header is actually expected. "
"Otherwise something is wrong, "
"and you might want to check how the dataset is loaded. "
"Futher information about header specification can be found in "
"https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.",
self._header_size,
)
self._field_spec = self._setup_binary_spec(field_spec, gadget_field_specs)
self._ptype_spec = self._setup_binary_spec(ptype_spec, gadget_ptype_specs)
self.index_ptype = index_ptype
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
# integration the redshift will be zero.
unit_base["cmcm"] = 1.0 / unit_base["UnitLength_in_cm"]
self._unit_base = unit_base
if bounding_box is not None:
bbox = np.array(bounding_box, dtype="float64")
if bbox.shape == (2, 3):
bbox = bbox.transpose()
self.domain_left_edge = bbox[:, 0]
self.domain_right_edge = bbox[:, 1]
else:
self.domain_left_edge = self.domain_right_edge = None
if units_override is not None:
raise RuntimeError(
"units_override is not supported for GadgetDataset. "
+ "Use unit_base instead."
)
# Set dark energy parameters before cosmology object is created
self.use_dark_factor = use_dark_factor
self.w_0 = w_0
self.w_a = w_a
super(GadgetDataset, self).__init__(
filename,
dataset_type=dataset_type,
unit_system=unit_system,
n_ref=n_ref,
over_refine_factor=over_refine_factor,
kernel_name=kernel_name,
)
if self.cosmological_simulation:
self.time_unit.convert_to_units("s/h")
self.length_unit.convert_to_units("kpccm/h")
self.mass_unit.convert_to_units("g/h")
else:
self.time_unit.convert_to_units("s")
self.length_unit.convert_to_units("kpc")
self.mass_unit.convert_to_units("Msun")
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _setup_binary_spec(cls, spec, spec_dict):
if isinstance(spec, string_types):
_hs = ()
for hs in spec.split("+"):
_hs += spec_dict[hs]
spec = _hs
return spec
|
def _setup_binary_spec(cls, spec, spec_dict):
if isinstance(spec, str):
_hs = ()
for hs in spec.split("+"):
_hs += spec_dict[hs]
spec = _hs
return spec
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _get_hvals(self):
return self._header.value
|
def _get_hvals(self):
# The entries in this header are capitalized and named to match Table 4
# in the GADGET-2 user guide.
gformat = _get_gadget_format(self.parameter_filename, self._header_size)
f = open(self.parameter_filename, "rb")
if gformat[0] == 2:
f.seek(f.tell() + SNAP_FORMAT_2_OFFSET)
hvals = read_record(f, self._header_spec, endian=gformat[1])
for i in hvals:
if len(hvals[i]) == 1:
hvals[i] = hvals[i][0]
return hvals
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
def _is_valid(cls, *args, **kwargs):
if "header_spec" in kwargs:
header_spec = kwargs["header_spec"]
else:
header_spec = "default"
header = GadgetBinaryHeader(args[0], header_spec)
return header.validate()
|
def _is_valid(cls, *args, **kwargs):
if "header_spec" in kwargs:
# Compute header size if header is customized
header_spec = cls._setup_binary_spec(kwargs["header_spec"], gadget_header_specs)
header_size = _compute_header_size(header_spec)
else:
header_size = 256
# First 4 bytes used to check load
return GadgetDataset._validate_header(args[0], header_size)[0]
|
https://github.com/yt-project/yt/issues/1846
|
Traceback (most recent call last):
File "./plot_solution.py", line 519, in <module>
pf.add_field(("gas", "density_squared"), function=_density_squared, units="g**2/cm**6")
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 1191, in add_field
self.index
File "/usr/local/lib/python2.7/dist-packages/yt/data_objects/static_output.py", line 504, in index
self, dataset_type=self.dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 39, in __init__
super(ParticleIndex, self).__init__(ds, dataset_type)
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/geometry_handler.py", line 50, in __init__
self._setup_geometry()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 50, in _setup_geometry
self._initialize_particle_handler()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 104, in _initialize_particle_handler
self._initialize_indices()
File "/usr/local/lib/python2.7/dist-packages/yt/geometry/particle_geometry_handler.py", line 134, in _initialize_indices
self.io._initialize_index(data_file, self.regions)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 351, in _initialize_index
data_file, count, 0, regions, DLE, DRE)
File "/usr/local/lib/python2.7/dist-packages/yt/frontends/gadget/io.py", line 334, in _get_morton_from_position
pp = np.fromfile(f, dtype=self._endian + self._float_type,
TypeError: cannot concatenate 'str' and 'int' objects
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.