after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _setup_emitter(general_config):
# Banner
emitter = general_config.emitter
emitter.banner(StakeHolder.banner)
return emitter
|
def _setup_emitter(general_config):
# Banner
emitter = general_config.emitter
emitter.clear()
emitter.banner(StakeHolder.banner)
return emitter
|
https://github.com/nucypher/nucypher/issues/1721
|
Traceback (most recent call last):
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/commands/worklock.py", line 139, in bid
receipt = bidder.place_bid(value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/actors.py", line 1557, in place_bid
receipt = self.worklock_agent.bid(checksum_address=self.checksum_address, value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/agents.py", line 993, in bid
payload={'value': value})
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 489, in send_transaction
confirmations=confirmations)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 408, in sign_and_broadcast_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def _setup_emitter(general_config):
emitter = general_config.emitter
emitter.banner(WORKLOCK_BANNER)
return emitter
|
def _setup_emitter(general_config):
emitter = general_config.emitter
emitter.clear()
emitter.banner(WORKLOCK_BANNER)
return emitter
|
https://github.com/nucypher/nucypher/issues/1721
|
Traceback (most recent call last):
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/commands/worklock.py", line 139, in bid
receipt = bidder.place_bid(value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/actors.py", line 1557, in place_bid
receipt = self.worklock_agent.bid(checksum_address=self.checksum_address, value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/agents.py", line 993, in bid
payload={'value': value})
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 489, in send_transaction
confirmations=confirmations)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 408, in sign_and_broadcast_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def __create_bidder(self, registry, transacting: bool = False, hw_wallet: bool = False):
client_password = None
if transacting and not hw_wallet:
client_password = get_client_password(checksum_address=self.bidder_address)
bidder = Bidder(
checksum_address=self.bidder_address,
registry=registry,
client_password=client_password,
is_transacting=transacting,
)
return bidder
|
def __create_bidder(self, registry, transacting: bool = False, hw_wallet: bool = False):
client_password = None
if transacting and not hw_wallet:
client_password = get_client_password(checksum_address=self.bidder_address)
bidder = Bidder(
checksum_address=self.bidder_address,
registry=registry,
client_password=client_password,
)
return bidder
|
https://github.com/nucypher/nucypher/issues/1721
|
Traceback (most recent call last):
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/commands/worklock.py", line 139, in bid
receipt = bidder.place_bid(value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/actors.py", line 1557, in place_bid
receipt = self.worklock_agent.bid(checksum_address=self.checksum_address, value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/agents.py", line 993, in bid
payload={'value': value})
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 489, in send_transaction
confirmations=confirmations)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 408, in sign_and_broadcast_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def make_cli_character(
character_config,
click_config,
unlock_keyring: bool = True,
teacher_uri: str = None,
min_stake: int = 0,
load_preferred_teachers: bool = True,
**config_args,
):
emitter = click_config.emitter
#
# Pre-Init
#
# Handle Keyring
if unlock_keyring:
character_config.attach_keyring()
unlock_nucypher_keyring(
emitter,
character_configuration=character_config,
password=get_nucypher_password(confirm=False),
)
# Handle Teachers
teacher_nodes = list()
if load_preferred_teachers:
teacher_nodes = load_seednodes(
emitter,
teacher_uris=[teacher_uri] if teacher_uri else None,
min_stake=min_stake,
federated_only=character_config.federated_only,
network_domains=character_config.domains,
network_middleware=character_config.network_middleware,
registry=character_config.registry,
)
#
# Character Init
#
# Produce Character
try:
CHARACTER = character_config(
known_nodes=teacher_nodes,
network_middleware=character_config.network_middleware,
**config_args,
)
except CryptoError:
raise character_config.keyring.AuthenticationFailed(
"Failed to unlock keyring. Are you sure you provided the correct password?"
)
#
# Post-Init
#
if CHARACTER.controller is not NO_CONTROL_PROTOCOL:
CHARACTER.controller.emitter = (
emitter # TODO: set it on object creation? Or not set at all?
)
# Federated
if character_config.federated_only:
emitter.message("WARNING: Running in Federated mode", color="yellow")
return CHARACTER
|
def make_cli_character(
character_config,
click_config,
dev: bool = False,
teacher_uri: str = None,
min_stake: int = 0,
**config_args,
):
emitter = click_config.emitter
#
# Pre-Init
#
# Handle Keyring
if not dev:
character_config.attach_keyring()
unlock_nucypher_keyring(
emitter,
character_configuration=character_config,
password=get_nucypher_password(confirm=False),
)
# Handle Teachers
teacher_nodes = load_seednodes(
emitter,
teacher_uris=[teacher_uri] if teacher_uri else None,
min_stake=min_stake,
federated_only=character_config.federated_only,
network_domains=character_config.domains,
network_middleware=character_config.network_middleware,
registry=character_config.registry,
)
#
# Character Init
#
# Produce Character
try:
CHARACTER = character_config(
known_nodes=teacher_nodes,
network_middleware=character_config.network_middleware,
**config_args,
)
except CryptoError:
raise character_config.keyring.AuthenticationFailed(
"Failed to unlock keyring. Are you sure you provided the correct password?"
)
#
# Post-Init
#
if CHARACTER.controller is not NO_CONTROL_PROTOCOL:
CHARACTER.controller.emitter = (
emitter # TODO: set it on object creation? Or not set at all?
)
# Federated
if character_config.federated_only:
emitter.message("WARNING: Running in Federated mode", color="yellow")
return CHARACTER
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def public_keys(
click_config,
# API Options
geth,
provider_uri,
federated_only,
dev,
pay_with,
network,
registry_filepath,
config_file,
discovery_port,
hw_wallet,
teacher_uri,
min_stake,
):
"""
Obtain Alice's public verification and encryption keys.
"""
### Setup ###
emitter = _setup_emitter(click_config)
alice_config, provider_uri = _get_alice_config(
click_config,
config_file,
dev,
discovery_port,
federated_only,
geth,
network,
pay_with,
provider_uri,
registry_filepath,
)
#############
ALICE = _create_alice(
alice_config,
click_config,
dev,
emitter,
hw_wallet,
teacher_uri,
min_stake,
load_seednodes=False,
)
response = ALICE.controller.public_keys()
return response
|
def public_keys(
click_config,
# API Options
geth,
provider_uri,
federated_only,
dev,
pay_with,
network,
registry_filepath,
config_file,
discovery_port,
hw_wallet,
teacher_uri,
min_stake,
):
"""
Obtain Alice's public verification and encryption keys.
"""
### Setup ###
emitter = _setup_emitter(click_config)
alice_config, provider_uri = _get_alice_config(
click_config,
config_file,
dev,
discovery_port,
federated_only,
geth,
network,
pay_with,
provider_uri,
registry_filepath,
)
#############
ALICE = _create_alice(
alice_config, click_config, dev, emitter, hw_wallet, teacher_uri, min_stake
)
response = ALICE.controller.public_keys()
return response
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def derive_policy_pubkey(
click_config,
# Other (required)
label,
# API Options
geth,
provider_uri,
federated_only,
dev,
pay_with,
network,
registry_filepath,
config_file,
discovery_port,
hw_wallet,
teacher_uri,
min_stake,
):
"""
Get a policy public key from a policy label.
"""
### Setup ###
emitter = _setup_emitter(click_config)
alice_config, provider_uri = _get_alice_config(
click_config,
config_file,
dev,
discovery_port,
federated_only,
geth,
network,
pay_with,
provider_uri,
registry_filepath,
)
#############
ALICE = _create_alice(
alice_config,
click_config,
dev,
emitter,
hw_wallet,
teacher_uri,
min_stake,
load_seednodes=False,
)
# Request
return ALICE.controller.derive_policy_encrypting_key(label=label)
|
def derive_policy_pubkey(
click_config,
# Other (required)
label,
# API Options
geth,
provider_uri,
federated_only,
dev,
pay_with,
network,
registry_filepath,
config_file,
discovery_port,
hw_wallet,
teacher_uri,
min_stake,
):
"""
Get a policy public key from a policy label.
"""
### Setup ###
emitter = _setup_emitter(click_config)
alice_config, provider_uri = _get_alice_config(
click_config,
config_file,
dev,
discovery_port,
federated_only,
geth,
network,
pay_with,
provider_uri,
registry_filepath,
)
#############
ALICE = _create_alice(
alice_config, click_config, dev, emitter, hw_wallet, teacher_uri, min_stake
)
# Request
return ALICE.controller.derive_policy_encrypting_key(label=label)
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def decrypt(
click_config,
# Other (required)
label,
message_kit,
# API Options
geth,
provider_uri,
federated_only,
dev,
pay_with,
network,
registry_filepath,
config_file,
discovery_port,
hw_wallet,
teacher_uri,
min_stake,
):
"""
Decrypt data encrypted under an Alice's policy public key.
"""
### Setup ###
emitter = _setup_emitter(click_config)
alice_config, provider_uri = _get_alice_config(
click_config,
config_file,
dev,
discovery_port,
federated_only,
geth,
network,
pay_with,
provider_uri,
registry_filepath,
)
#############
ALICE = _create_alice(
alice_config,
click_config,
dev,
emitter,
hw_wallet,
teacher_uri,
min_stake,
load_seednodes=False,
)
# Request
request_data = {"label": label, "message_kit": message_kit}
response = ALICE.controller.decrypt(request=request_data)
return response
|
def decrypt(
click_config,
# Other (required)
label,
message_kit,
# API Options
geth,
provider_uri,
federated_only,
dev,
pay_with,
network,
registry_filepath,
config_file,
discovery_port,
hw_wallet,
teacher_uri,
min_stake,
):
"""
Decrypt data encrypted under an Alice's policy public key.
"""
### Setup ###
emitter = _setup_emitter(click_config)
alice_config, provider_uri = _get_alice_config(
click_config,
config_file,
dev,
discovery_port,
federated_only,
geth,
network,
pay_with,
provider_uri,
registry_filepath,
)
#############
ALICE = _create_alice(
alice_config, click_config, dev, emitter, hw_wallet, teacher_uri, min_stake
)
# Request
request_data = {"label": label, "message_kit": message_kit}
response = ALICE.controller.decrypt(request=request_data)
return response
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def _create_alice(
alice_config,
click_config,
dev,
emitter,
hw_wallet,
teacher_uri,
min_stake,
load_seednodes=True,
):
#
# Produce Alice
#
client_password = None
if not alice_config.federated_only:
if (not hw_wallet or not dev) and not click_config.json_ipc:
client_password = get_client_password(
checksum_address=alice_config.checksum_address
)
try:
ALICE = actions.make_cli_character(
character_config=alice_config,
click_config=click_config,
unlock_keyring=not dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
client_password=client_password,
load_preferred_teachers=load_seednodes,
start_learning_now=load_seednodes,
)
return ALICE
except NucypherKeyring.AuthenticationFailed as e:
emitter.echo(str(e), color="red", bold=True)
click.get_current_context().exit(1)
|
def _create_alice(
alice_config, click_config, dev, emitter, hw_wallet, teacher_uri, min_stake
):
#
# Produce Alice
#
client_password = None
if not alice_config.federated_only:
if (not hw_wallet or not dev) and not click_config.json_ipc:
client_password = get_client_password(
checksum_address=alice_config.checksum_address
)
try:
ALICE = actions.make_cli_character(
character_config=alice_config,
click_config=click_config,
dev=dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
client_password=client_password,
)
return ALICE
except NucypherKeyring.AuthenticationFailed as e:
emitter.echo(str(e), color="red", bold=True)
click.get_current_context().exit(1)
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def run(
click_config,
# API Options
provider_uri,
network,
registry_filepath,
checksum_address,
dev,
config_file,
discovery_port,
teacher_uri,
min_stake,
# Other
controller_port,
dry_run,
):
"""
Start Bob's controller.
"""
### Setup ###
emitter = _setup_emitter(click_config)
bob_config = _get_bob_config(
click_config,
dev,
provider_uri,
network,
registry_filepath,
checksum_address,
config_file,
discovery_port,
)
#############
BOB = actions.make_cli_character(
character_config=bob_config,
click_config=click_config,
unlock_keyring=not dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
)
# RPC
if click_config.json_ipc:
rpc_controller = BOB.make_rpc_controller()
_transport = rpc_controller.make_control_transport()
rpc_controller.start()
return
# Echo Public Keys
emitter.message(
f"Bob Verifying Key {bytes(BOB.stamp).hex()}", color="green", bold=True
)
bob_encrypting_key = bytes(BOB.public_keys(DecryptingPower)).hex()
emitter.message(f"Bob Encrypting Key {bob_encrypting_key}", color="blue", bold=True)
# Start Controller
controller = BOB.make_web_controller(crash_on_error=click_config.debug)
BOB.log.info("Starting HTTP Character Web Controller")
return controller.start(http_port=controller_port, dry_run=dry_run)
|
def run(
click_config,
# API Options
provider_uri,
network,
registry_filepath,
checksum_address,
dev,
config_file,
discovery_port,
teacher_uri,
min_stake,
# Other
controller_port,
dry_run,
):
"""
Start Bob's controller.
"""
### Setup ###
emitter = _setup_emitter(click_config)
bob_config = _get_bob_config(
click_config,
dev,
provider_uri,
network,
registry_filepath,
checksum_address,
config_file,
discovery_port,
)
#############
BOB = actions.make_cli_character(
character_config=bob_config,
click_config=click_config,
dev=dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
)
# RPC
if click_config.json_ipc:
rpc_controller = BOB.make_rpc_controller()
_transport = rpc_controller.make_control_transport()
rpc_controller.start()
return
# Echo Public Keys
emitter.message(
f"Bob Verifying Key {bytes(BOB.stamp).hex()}", color="green", bold=True
)
bob_encrypting_key = bytes(BOB.public_keys(DecryptingPower)).hex()
emitter.message(f"Bob Encrypting Key {bob_encrypting_key}", color="blue", bold=True)
# Start Controller
controller = BOB.make_web_controller(crash_on_error=click_config.debug)
BOB.log.info("Starting HTTP Character Web Controller")
return controller.start(http_port=controller_port, dry_run=dry_run)
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def public_keys(
click_config,
# API Options
provider_uri,
network,
registry_filepath,
checksum_address,
dev,
config_file,
discovery_port,
teacher_uri,
min_stake,
):
"""
Obtain Bob's public verification and encryption keys.
"""
### Setup ###
_setup_emitter(click_config)
bob_config = _get_bob_config(
click_config,
dev,
provider_uri,
network,
registry_filepath,
checksum_address,
config_file,
discovery_port,
)
#############
BOB = actions.make_cli_character(
character_config=bob_config,
click_config=click_config,
unlock_keyring=not dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
load_preferred_teachers=False,
start_learning_now=False,
)
response = BOB.controller.public_keys()
return response
|
def public_keys(
click_config,
# API Options
provider_uri,
network,
registry_filepath,
checksum_address,
dev,
config_file,
discovery_port,
teacher_uri,
min_stake,
):
"""
Obtain Bob's public verification and encryption keys.
"""
### Setup ###
_setup_emitter(click_config)
bob_config = _get_bob_config(
click_config,
dev,
provider_uri,
network,
registry_filepath,
checksum_address,
config_file,
discovery_port,
)
#############
BOB = actions.make_cli_character(
character_config=bob_config,
click_config=click_config,
dev=dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
)
response = BOB.controller.public_keys()
return response
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def retrieve(
click_config,
# API Options
provider_uri,
network,
registry_filepath,
checksum_address,
dev,
config_file,
discovery_port,
teacher_uri,
min_stake,
# Other
label,
policy_encrypting_key,
alice_verifying_key,
message_kit,
):
"""
Obtain plaintext from encrypted data, if access was granted.
"""
### Setup ###
_setup_emitter(click_config)
bob_config = _get_bob_config(
click_config,
dev,
provider_uri,
network,
registry_filepath,
checksum_address,
config_file,
discovery_port,
)
#############
BOB = actions.make_cli_character(
character_config=bob_config,
click_config=click_config,
unlock_keyring=not dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
)
# Validate
if not all((label, policy_encrypting_key, alice_verifying_key, message_kit)):
input_specification, output_specification = BOB.control.get_specifications(
interface_name="retrieve"
)
required_fields = ", ".join(input_specification)
raise click.BadArgumentUsage(
f"{required_fields} are required flags to retrieve"
)
# Request
bob_request_data = {
"label": label,
"policy_encrypting_key": policy_encrypting_key,
"alice_verifying_key": alice_verifying_key,
"message_kit": message_kit,
}
response = BOB.controller.retrieve(request=bob_request_data)
return response
|
def retrieve(
click_config,
# API Options
provider_uri,
network,
registry_filepath,
checksum_address,
dev,
config_file,
discovery_port,
teacher_uri,
min_stake,
# Other
label,
policy_encrypting_key,
alice_verifying_key,
message_kit,
):
"""
Obtain plaintext from encrypted data, if access was granted.
"""
### Setup ###
_setup_emitter(click_config)
bob_config = _get_bob_config(
click_config,
dev,
provider_uri,
network,
registry_filepath,
checksum_address,
config_file,
discovery_port,
)
#############
BOB = actions.make_cli_character(
character_config=bob_config,
click_config=click_config,
dev=dev,
teacher_uri=teacher_uri,
min_stake=min_stake,
)
# Validate
if not all((label, policy_encrypting_key, alice_verifying_key, message_kit)):
input_specification, output_specification = BOB.control.get_specifications(
interface_name="retrieve"
)
required_fields = ", ".join(input_specification)
raise click.BadArgumentUsage(
f"{required_fields} are required flags to retrieve"
)
# Request
bob_request_data = {
"label": label,
"policy_encrypting_key": policy_encrypting_key,
"alice_verifying_key": alice_verifying_key,
"message_kit": message_kit,
}
response = BOB.controller.retrieve(request=bob_request_data)
return response
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def save_metadata(
click_config,
# API Options
geth,
provider_uri,
network,
registry_filepath,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
config_file,
dev,
lonely,
teacher_uri,
min_stake,
):
"""
Manually write node metadata to disk without running.
"""
### Setup ###
_validate_args(geth, federated_only, staker_address, registry_filepath)
emitter = _setup_emitter(click_config, worker_address)
_pre_launch_warnings(emitter, dev=dev, force=None)
ursula_config, provider_uri = _get_ursula_config(
emitter,
geth,
provider_uri,
network,
registry_filepath,
dev,
config_file,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
)
#############
URSULA = _create_ursula(
ursula_config,
click_config,
dev,
emitter,
lonely,
teacher_uri,
min_stake,
load_seednodes=False,
)
metadata_path = URSULA.write_node_metadata(node=URSULA)
emitter.message(
f"Successfully saved node metadata to {metadata_path}.", color="green"
)
|
def save_metadata(
click_config,
# API Options
geth,
provider_uri,
network,
registry_filepath,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
config_file,
dev,
lonely,
teacher_uri,
min_stake,
):
"""
Manually write node metadata to disk without running.
"""
### Setup ###
_validate_args(geth, federated_only, staker_address, registry_filepath)
emitter = _setup_emitter(click_config, worker_address)
_pre_launch_warnings(emitter, dev=dev, force=None)
ursula_config, provider_uri = _get_ursula_config(
emitter,
geth,
provider_uri,
network,
registry_filepath,
dev,
config_file,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
)
#############
URSULA = _create_ursula(
ursula_config, click_config, dev, emitter, lonely, teacher_uri, min_stake
)
metadata_path = URSULA.write_node_metadata(node=URSULA)
emitter.message(
f"Successfully saved node metadata to {metadata_path}.", color="green"
)
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def confirm_activity(
click_config,
# API Options
geth,
provider_uri,
network,
registry_filepath,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
config_file,
dev,
lonely,
teacher_uri,
min_stake,
):
"""
Manually confirm-activity for the current period.
"""
### Setup ###
_validate_args(geth, federated_only, staker_address, registry_filepath)
emitter = _setup_emitter(click_config, worker_address)
_pre_launch_warnings(emitter, dev=dev, force=None)
ursula_config, provider_uri = _get_ursula_config(
emitter,
geth,
provider_uri,
network,
registry_filepath,
dev,
config_file,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
)
#############
URSULA = _create_ursula(
ursula_config,
click_config,
dev,
emitter,
lonely,
teacher_uri,
min_stake,
load_seednodes=False,
)
confirmed_period = URSULA.staking_agent.get_current_period() + 1
click.echo(f"Confirming activity for period {confirmed_period}", color="blue")
receipt = URSULA.confirm_activity()
economics = TokenEconomicsFactory.get_economics(registry=URSULA.registry)
date = datetime_at_period(
period=confirmed_period, seconds_per_period=economics.seconds_per_period
)
# TODO: Double-check dates here
emitter.echo(
f"\nActivity confirmed for period #{confirmed_period} (starting at {date})",
bold=True,
color="blue",
)
painting.paint_receipt_summary(
emitter=emitter,
receipt=receipt,
chain_name=URSULA.staking_agent.blockchain.client.chain_name,
)
|
def confirm_activity(
click_config,
# API Options
geth,
provider_uri,
network,
registry_filepath,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
config_file,
dev,
lonely,
teacher_uri,
min_stake,
):
"""
Manually confirm-activity for the current period.
"""
### Setup ###
_validate_args(geth, federated_only, staker_address, registry_filepath)
emitter = _setup_emitter(click_config, worker_address)
_pre_launch_warnings(emitter, dev=dev, force=None)
ursula_config, provider_uri = _get_ursula_config(
emitter,
geth,
provider_uri,
network,
registry_filepath,
dev,
config_file,
staker_address,
worker_address,
federated_only,
rest_host,
rest_port,
db_filepath,
poa,
light,
)
#############
URSULA = _create_ursula(
ursula_config, click_config, dev, emitter, lonely, teacher_uri, min_stake
)
receipt = URSULA.confirm_activity()
confirmed_period = URSULA.staking_agent.get_current_period() + 1
date = datetime_at_period(
period=confirmed_period, seconds_per_period=URSULA.economics.seconds_per_period
)
# TODO: Double-check dates here
emitter.echo(
f"\nActivity confirmed for period #{confirmed_period} (starting at {date})",
bold=True,
color="blue",
)
painting.paint_receipt_summary(
emitter=emitter,
receipt=receipt,
chain_name=URSULA.staking_agent.blockchain.client.chain_name,
)
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def _create_ursula(
ursula_config,
click_config,
dev,
emitter,
lonely,
teacher_uri,
min_stake,
load_seednodes=True,
):
#
# Make Ursula
#
client_password = None
if not ursula_config.federated_only:
if not dev and not click_config.json_ipc:
client_password = get_client_password(
checksum_address=ursula_config.worker_address,
envvar="NUCYPHER_WORKER_ETH_PASSWORD",
)
try:
URSULA = actions.make_cli_character(
character_config=ursula_config,
click_config=click_config,
min_stake=min_stake,
teacher_uri=teacher_uri,
unlock_keyring=not dev,
lonely=lonely,
client_password=client_password,
load_preferred_teachers=load_seednodes,
start_learning_now=load_seednodes,
)
return URSULA
except NucypherKeyring.AuthenticationFailed as e:
emitter.echo(str(e), color="red", bold=True)
# TODO: Exit codes (not only for this, but for other exceptions)
return click.get_current_context().exit(1)
|
def _create_ursula(
ursula_config, click_config, dev, emitter, lonely, teacher_uri, min_stake
):
#
# Make Ursula
#
client_password = None
if not ursula_config.federated_only:
if not dev and not click_config.json_ipc:
client_password = get_client_password(
checksum_address=ursula_config.worker_address,
envvar="NUCYPHER_WORKER_ETH_PASSWORD",
)
try:
URSULA = actions.make_cli_character(
character_config=ursula_config,
click_config=click_config,
min_stake=min_stake,
teacher_uri=teacher_uri,
dev=dev,
lonely=lonely,
client_password=client_password,
)
return URSULA
except NucypherKeyring.AuthenticationFailed as e:
emitter.echo(str(e), color="red", bold=True)
# TODO: Exit codes (not only for this, but for other exceptions)
return click.get_current_context().exit(1)
|
https://github.com/nucypher/nucypher/issues/1233
|
(nucypher) derek@derek-home-server:~/nucypher/forks/david/nucypher$ nucypher ursula run --teacher <teacher_uri> --interactive
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Enter password to unlock account 0x4a0A9e421B624410DAf3AF42707F7c5973a2C963:
Enter NuCypher keyring password:
Decrypting NuCypher keyring...
Starting Ursula on <my_ip>:9151
Connecting to goerli
Working ~ Keep Ursula Online!
Attached 0xf6B42791C096C94F92AA0938fA13C93B0eD1EB2A@<my_ip>:9151
⛰ ⚘ | Ivory Mountain DarkOrchid Flower
Type 'help' or '?' for help
Ursula(0xf6B4279) >>> confirm_activity
Unhandled Error
Traceback (most recent call last):
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite
why = selectable.doRead()
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 291, in doRead
return fdesc.readFromFD(self.fd, self.dataReceived)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/fdesc.py", line 94, in readFromFD
callback(output)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/process.py", line 295, in dataReceived
self.proc.childDataReceived(self.name, data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/internet/_posixstdio.py", line 77, in childDataReceived
self.protocol.dataReceived(data)
File "/home/derek/.local/share/virtualenvs/nucypher-yyLOaW15/lib/python3.6/site-packages/twisted/protocols/basic.py", line 572, in dataReceived
why = self.lineReceived(line)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 148, in lineReceived
self.__commands[line]()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/cli/processes.py", line 183, in confirm_activity
return self.ursula.confirm_activity()
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 70, in wrapped
return func(actor, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 77, in wrapped
receipt = actor_method(self, *args, **kwargs)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/actors.py", line 662, in confirm_activity
receipt = self.staking_agent.confirm_activity(worker_address=self.__worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/agents.py", line 279, in confirm_activity
receipt = self.blockchain.send_transaction(contract_function=contract_function, sender_address=worker_address)
File "/home/derek/nucypher/forks/david/nucypher/nucypher/blockchain/eth/interfaces.py", line 353, in send_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
def set_worker(
click_config,
# Worker Options
poa,
light,
registry_filepath,
config_file,
provider_uri,
staking_address,
hw_wallet,
beneficiary_address,
allocation_filepath,
worker_address,
# Other options
force,
):
"""
Bond a worker to a staker.
"""
### Setup ###
emitter = _setup_emitter(click_config)
STAKEHOLDER, blockchain = _create_stakeholder(
config_file,
provider_uri,
poa,
light,
registry_filepath,
staking_address,
beneficiary_address=beneficiary_address,
allocation_filepath=allocation_filepath,
)
#############
economics = STAKEHOLDER.economics
client_account, staking_address = handle_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
if not worker_address:
worker_address = click.prompt(
"Enter worker address", type=EIP55_CHECKSUM_ADDRESS
)
# TODO: Check preconditions (e.g., minWorkerPeriods, already in use, etc)
password = None
if not hw_wallet and not blockchain.client.is_local:
password = get_client_password(checksum_address=client_account)
# TODO: Double-check dates
# Calculate release datetime
current_period = STAKEHOLDER.staking_agent.get_current_period()
bonded_date = datetime_at_period(
period=current_period, seconds_per_period=economics.seconds_per_period
)
min_worker_periods = STAKEHOLDER.economics.minimum_worker_periods
release_period = current_period + min_worker_periods
release_date = datetime_at_period(
period=release_period,
seconds_per_period=economics.seconds_per_period,
start_of_period=True,
)
click.confirm(
f"Commit to bonding "
f"worker {worker_address} to staker {client_account} "
f"for a minimum of {STAKEHOLDER.economics.minimum_worker_periods} periods?",
abort=True,
)
STAKEHOLDER.assimilate(checksum_address=client_account, password=password)
receipt = STAKEHOLDER.set_worker(worker_address=worker_address)
# Report Success
emitter.echo(
f"\nWorker {worker_address} successfully bonded to staker {staking_address}",
color="green",
)
paint_receipt_summary(
emitter=emitter,
receipt=receipt,
chain_name=blockchain.client.chain_name,
transaction_type="set_worker",
)
emitter.echo(f"Bonded at period #{current_period} ({bonded_date})", color="green")
emitter.echo(
f"This worker can be replaced or detached after period "
f"#{release_period} ({release_date})",
color="green",
)
|
def set_worker(
click_config,
# Worker Options
poa,
light,
registry_filepath,
config_file,
provider_uri,
staking_address,
hw_wallet,
beneficiary_address,
allocation_filepath,
worker_address,
# Other options
force,
):
"""
Bond a worker to a staker.
"""
### Setup ###
emitter = _setup_emitter(click_config)
STAKEHOLDER, blockchain = _create_stakeholder(
config_file,
provider_uri,
poa,
light,
registry_filepath,
staking_address,
beneficiary_address=beneficiary_address,
allocation_filepath=allocation_filepath,
)
#############
economics = STAKEHOLDER.economics
client_account, staking_address = handle_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
if not worker_address:
worker_address = click.prompt(
"Enter worker address", type=EIP55_CHECKSUM_ADDRESS
)
# TODO: Check preconditions (e.g., minWorkerPeriods, already in use, etc)
password = None
if not hw_wallet and not blockchain.client.is_local:
password = get_client_password(checksum_address=client_account)
STAKEHOLDER.assimilate(checksum_address=client_account, password=password)
receipt = STAKEHOLDER.set_worker(worker_address=worker_address)
# TODO: Double-check dates
current_period = STAKEHOLDER.staking_agent.get_current_period()
bonded_date = datetime_at_period(
period=current_period, seconds_per_period=economics.seconds_per_period
)
min_worker_periods = STAKEHOLDER.staking_agent.staking_parameters()[7]
release_period = current_period + min_worker_periods
release_date = datetime_at_period(
period=release_period,
seconds_per_period=economics.seconds_per_period,
start_of_period=True,
)
emitter.echo(
f"\nWorker {worker_address} successfully bonded to staker {staking_address}",
color="green",
)
paint_receipt_summary(
emitter=emitter,
receipt=receipt,
chain_name=blockchain.client.chain_name,
transaction_type="set_worker",
)
emitter.echo(f"Bonded at period #{current_period} ({bonded_date})", color="green")
emitter.echo(
f"This worker can be replaced or detached after period "
f"#{release_period} ({release_date})",
color="green",
)
|
https://github.com/nucypher/nucypher/issues/1375
|
____ __ __
/\ _`\ /\ \__ /\ \
\ \,\L\_\ \ ,_\ __ \ \ \/'\ __ _ __
\/_\__ \\ \ \/ /'__`\\ \ , < /'__`\/\`'__\
/\ \L\ \ \ \_/\ \L\.\\ \ \\`\ /\ __/\ \ \/
\ `\____\ \__\ \__/.\_\ \_\ \_\ \____\\ \_\
\/_____/\/__/\/__/\/_/\/_/\/_/\/____/ \/_/
The Holder of Stakes.
======================================= Active Stakes =========================================
| ~ | Staker | Worker | # | Value | Duration | Enactment
| | ------ | ------ | - | -------- | ------------ | -----------------------------------------
| 0 | 0xFD3D | 0x1368 | 0 | 15000 NU | 45 periods . | Aug 06 21:00:30 PDT - Sep 20 21:00:30 PDT
| 1 | 0xFD3D | 0x1368 | 1 | 100000 NU | 300 periods | Oct 01 21:00:30 PDT - Jul 27 21:00:30 PDT
| 2 | 0x9CA6 | 0x3d51 | 0 | 175864.607570726413344034 NU | 33 periods . | Aug 25 21:00:30 PDT - Sep 27 21:00:30 PDT
| 3 | 0x9CA6 | 0x3d51 | 1 | 200000 NU | 365 periods | Oct 01 21:00:30 PDT - Sep 30 21:00:30 PDT
| 4 | 0x26FD | 0x4801 | 0 | 50000 NU | 60 periods . | Sep 01 21:00:30 PDT - Oct 31 21:00:30 PDT
Select Stake: 4
Enter target value (must be less than or equal to 50000 NU): 40000
Enter number of periods to extend: 250
Traceback (most recent call last):
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/config.py", line 217, in wrapper
return func(config, *args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/characters/stake.py", line 365, in stake
extension=extension)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/painting.py", line 406, in paint_staged_stake_division
new_end_period = original_stake.end_period + extension
AttributeError: 'Stake' object has no attribute 'end_period'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
AttributeError
|
def create(
click_config,
# Stake Options
poa,
light,
registry_filepath,
config_file,
provider_uri,
staking_address,
hw_wallet,
beneficiary_address,
allocation_filepath,
# Other
force,
value,
lock_periods,
):
"""
Initialize a new stake.
"""
### Setup ###
emitter = _setup_emitter(click_config)
STAKEHOLDER, blockchain = _create_stakeholder(
config_file,
provider_uri,
poa,
light,
registry_filepath,
staking_address,
beneficiary_address=beneficiary_address,
allocation_filepath=allocation_filepath,
)
#############
economics = STAKEHOLDER.economics
client_account, staking_address = handle_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
# Dynamic click types (Economics)
min_locked = economics.minimum_allowed_locked
stake_value_range = click.FloatRange(
min=NU.from_nunits(min_locked).to_tokens(), clamp=False
)
stake_duration_range = click.IntRange(
min=economics.minimum_locked_periods, clamp=False
)
password = None
if not hw_wallet and not blockchain.client.is_local:
password = get_client_password(checksum_address=client_account)
#
# Stage Stake
#
if not value:
value = click.prompt(
f"Enter stake value in NU "
f"({NU.from_nunits(STAKEHOLDER.economics.minimum_allowed_locked)} - "
f"{NU.from_nunits(STAKEHOLDER.economics.maximum_allowed_locked)})",
type=stake_value_range,
default=NU.from_nunits(min_locked).to_tokens(),
)
value = NU.from_tokens(value)
if not lock_periods:
prompt = (
f"Enter stake duration ({STAKEHOLDER.economics.minimum_locked_periods} - "
f"{STAKEHOLDER.economics.maximum_rewarded_periods})"
)
lock_periods = click.prompt(prompt, type=stake_duration_range)
start_period = STAKEHOLDER.staking_agent.get_current_period() + 1
unlock_period = start_period + lock_periods
#
# ReviewPub
#
if not force:
painting.paint_staged_stake(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
stake_value=value,
lock_periods=lock_periods,
start_period=start_period,
unlock_period=unlock_period,
)
confirm_staged_stake(
staker_address=staking_address, value=value, lock_periods=lock_periods
)
# Last chance to bail
click.confirm("Publish staged stake to the blockchain?", abort=True)
# Execute
STAKEHOLDER.assimilate(checksum_address=client_account, password=password)
new_stake = STAKEHOLDER.initialize_stake(amount=value, lock_periods=lock_periods)
painting.paint_staking_confirmation(
emitter=emitter, ursula=STAKEHOLDER, transactions=new_stake.transactions
)
|
def create(
click_config,
# Stake Options
poa,
light,
registry_filepath,
config_file,
provider_uri,
staking_address,
hw_wallet,
beneficiary_address,
allocation_filepath,
# Other
force,
value,
lock_periods,
):
"""
Initialize a new stake.
"""
### Setup ###
emitter = _setup_emitter(click_config)
STAKEHOLDER, blockchain = _create_stakeholder(
config_file,
provider_uri,
poa,
light,
registry_filepath,
staking_address,
beneficiary_address=beneficiary_address,
allocation_filepath=allocation_filepath,
)
#############
economics = STAKEHOLDER.economics
client_account, staking_address = handle_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
# Dynamic click types (Economics)
min_locked = economics.minimum_allowed_locked
stake_value_range = click.FloatRange(
min=NU.from_nunits(min_locked).to_tokens(), clamp=False
)
stake_duration_range = click.IntRange(
min=economics.minimum_locked_periods, clamp=False
)
password = None
if not hw_wallet and not blockchain.client.is_local:
password = get_client_password(checksum_address=client_account)
#
# Stage Stake
#
if not value:
value = click.prompt(
f"Enter stake value in NU "
f"({NU.from_nunits(STAKEHOLDER.economics.minimum_allowed_locked)} - "
f"{NU.from_nunits(STAKEHOLDER.economics.maximum_allowed_locked)})",
type=stake_value_range,
default=NU.from_nunits(min_locked).to_tokens(),
)
value = NU.from_tokens(value)
if not lock_periods:
prompt = f"Enter stake duration ({STAKEHOLDER.economics.minimum_locked_periods} periods minimum)"
lock_periods = click.prompt(prompt, type=stake_duration_range)
start_period = STAKEHOLDER.staking_agent.get_current_period() + 1
unlock_period = start_period + lock_periods
#
# Review
#
if not force:
painting.paint_staged_stake(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
stake_value=value,
lock_periods=lock_periods,
start_period=start_period,
unlock_period=unlock_period,
)
confirm_staged_stake(
staker_address=staking_address, value=value, lock_periods=lock_periods
)
# Last chance to bail
click.confirm("Publish staged stake to the blockchain?", abort=True)
# Execute
STAKEHOLDER.assimilate(checksum_address=client_account, password=password)
new_stake = STAKEHOLDER.initialize_stake(amount=value, lock_periods=lock_periods)
painting.paint_staking_confirmation(
emitter=emitter, ursula=STAKEHOLDER, transactions=new_stake.transactions
)
|
https://github.com/nucypher/nucypher/issues/1375
|
____ __ __
/\ _`\ /\ \__ /\ \
\ \,\L\_\ \ ,_\ __ \ \ \/'\ __ _ __
\/_\__ \\ \ \/ /'__`\\ \ , < /'__`\/\`'__\
/\ \L\ \ \ \_/\ \L\.\\ \ \\`\ /\ __/\ \ \/
\ `\____\ \__\ \__/.\_\ \_\ \_\ \____\\ \_\
\/_____/\/__/\/__/\/_/\/_/\/_/\/____/ \/_/
The Holder of Stakes.
======================================= Active Stakes =========================================
| ~ | Staker | Worker | # | Value | Duration | Enactment
| | ------ | ------ | - | -------- | ------------ | -----------------------------------------
| 0 | 0xFD3D | 0x1368 | 0 | 15000 NU | 45 periods . | Aug 06 21:00:30 PDT - Sep 20 21:00:30 PDT
| 1 | 0xFD3D | 0x1368 | 1 | 100000 NU | 300 periods | Oct 01 21:00:30 PDT - Jul 27 21:00:30 PDT
| 2 | 0x9CA6 | 0x3d51 | 0 | 175864.607570726413344034 NU | 33 periods . | Aug 25 21:00:30 PDT - Sep 27 21:00:30 PDT
| 3 | 0x9CA6 | 0x3d51 | 1 | 200000 NU | 365 periods | Oct 01 21:00:30 PDT - Sep 30 21:00:30 PDT
| 4 | 0x26FD | 0x4801 | 0 | 50000 NU | 60 periods . | Sep 01 21:00:30 PDT - Oct 31 21:00:30 PDT
Select Stake: 4
Enter target value (must be less than or equal to 50000 NU): 40000
Enter number of periods to extend: 250
Traceback (most recent call last):
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/config.py", line 217, in wrapper
return func(config, *args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/characters/stake.py", line 365, in stake
extension=extension)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/painting.py", line 406, in paint_staged_stake_division
new_end_period = original_stake.end_period + extension
AttributeError: 'Stake' object has no attribute 'end_period'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
AttributeError
|
def divide(
click_config,
# Stake Options
poa,
light,
registry_filepath,
config_file,
provider_uri,
staking_address,
hw_wallet,
beneficiary_address,
allocation_filepath,
# Other
force,
value,
lock_periods,
index,
):
"""
Create a new stake from part of an existing one.
"""
### Setup ###
emitter = _setup_emitter(click_config)
STAKEHOLDER, blockchain = _create_stakeholder(
config_file,
provider_uri,
poa,
light,
registry_filepath,
staking_address,
beneficiary_address=beneficiary_address,
allocation_filepath=allocation_filepath,
)
#############
client_account, staking_address = handle_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
economics = STAKEHOLDER.economics
# Dynamic click types (Economics)
min_locked = economics.minimum_allowed_locked
stake_value_range = click.FloatRange(
min=NU.from_nunits(min_locked).to_tokens(), clamp=False
)
stake_extension_range = click.IntRange(
min=1, max=economics.maximum_allowed_locked, clamp=False
)
if staking_address and index is not None: # 0 is valid.
STAKEHOLDER.stakes = StakeList(
registry=STAKEHOLDER.registry, checksum_address=staking_address
)
STAKEHOLDER.stakes.refresh()
current_stake = STAKEHOLDER.stakes[index]
else:
current_stake = select_stake(stakeholder=STAKEHOLDER, emitter=emitter)
#
# Stage Stake
#
# Value
if not value:
value = click.prompt(
f"Enter target value"
f"{NU.from_nunits(STAKEHOLDER.economics.minimum_allowed_locked)})"
f"- ({str(current_stake.value)}",
type=stake_value_range,
)
value = NU(value, "NU")
# Duration
if not lock_periods:
extension = click.prompt(
"Enter number of periods to extend", type=stake_extension_range
)
else:
extension = lock_periods
if not force:
painting.paint_staged_stake_division(
emitter=emitter,
stakeholder=STAKEHOLDER,
original_stake=current_stake,
target_value=value,
extension=extension,
)
click.confirm("Is this correct?", abort=True)
# Execute
password = None
if not hw_wallet and not blockchain.client.is_local:
password = get_client_password(checksum_address=current_stake.staker_address)
STAKEHOLDER.assimilate(
checksum_address=current_stake.staker_address, password=password
)
modified_stake, new_stake = STAKEHOLDER.divide_stake(
stake_index=current_stake.index,
target_value=value,
additional_periods=extension,
)
emitter.echo("Successfully divided stake", color="green", verbosity=1)
paint_receipt_summary(
emitter=emitter,
receipt=new_stake.receipt,
chain_name=blockchain.client.chain_name,
)
# Show the resulting stake list
painting.paint_stakes(emitter=emitter, stakes=STAKEHOLDER.stakes)
|
def divide(
click_config,
# Stake Options
poa,
light,
registry_filepath,
config_file,
provider_uri,
staking_address,
hw_wallet,
beneficiary_address,
allocation_filepath,
# Other
force,
value,
lock_periods,
index,
):
"""
Create a new stake from part of an existing one.
"""
### Setup ###
emitter = _setup_emitter(click_config)
STAKEHOLDER, blockchain = _create_stakeholder(
config_file,
provider_uri,
poa,
light,
registry_filepath,
staking_address,
beneficiary_address=beneficiary_address,
allocation_filepath=allocation_filepath,
)
#############
client_account, staking_address = handle_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
economics = STAKEHOLDER.economics
# Dynamic click types (Economics)
min_locked = economics.minimum_allowed_locked
stake_value_range = click.FloatRange(
min=NU.from_nunits(min_locked).to_tokens(), clamp=False
)
stake_extension_range = click.IntRange(
min=1, max=economics.maximum_allowed_locked, clamp=False
)
if staking_address and index is not None: # 0 is valid.
STAKEHOLDER.stakes = StakeList(
registry=STAKEHOLDER.registry, checksum_address=staking_address
)
STAKEHOLDER.stakes.refresh()
current_stake = STAKEHOLDER.stakes[index]
else:
current_stake = select_stake(stakeholder=STAKEHOLDER, emitter=emitter)
#
# Stage Stake
#
# Value
if not value:
value = click.prompt(
f"Enter target value ({str(current_stake.value)} - "
f"{NU.from_nunits(STAKEHOLDER.economics.maximum_allowed_locked)})",
type=stake_value_range,
)
value = NU(value, "NU")
# Duration
if not lock_periods:
extension = click.prompt(
"Enter number of periods to extend", type=stake_extension_range
)
else:
extension = lock_periods
if not force:
painting.paint_staged_stake_division(
emitter=emitter,
stakeholder=STAKEHOLDER,
original_stake=current_stake,
target_value=value,
extension=extension,
)
click.confirm("Is this correct?", abort=True)
# Execute
password = None
if not hw_wallet and not blockchain.client.is_local:
password = get_client_password(checksum_address=current_stake.staker_address)
STAKEHOLDER.assimilate(
checksum_address=current_stake.staker_address, password=password
)
modified_stake, new_stake = STAKEHOLDER.divide_stake(
stake_index=current_stake.index,
target_value=value,
additional_periods=extension,
)
emitter.echo("Successfully divided stake", color="green", verbosity=1)
paint_receipt_summary(
emitter=emitter,
receipt=new_stake.receipt,
chain_name=blockchain.client.chain_name,
)
# Show the resulting stake list
painting.paint_stakes(emitter=emitter, stakes=STAKEHOLDER.stakes)
|
https://github.com/nucypher/nucypher/issues/1375
|
____ __ __
/\ _`\ /\ \__ /\ \
\ \,\L\_\ \ ,_\ __ \ \ \/'\ __ _ __
\/_\__ \\ \ \/ /'__`\\ \ , < /'__`\/\`'__\
/\ \L\ \ \ \_/\ \L\.\\ \ \\`\ /\ __/\ \ \/
\ `\____\ \__\ \__/.\_\ \_\ \_\ \____\\ \_\
\/_____/\/__/\/__/\/_/\/_/\/_/\/____/ \/_/
The Holder of Stakes.
======================================= Active Stakes =========================================
| ~ | Staker | Worker | # | Value | Duration | Enactment
| | ------ | ------ | - | -------- | ------------ | -----------------------------------------
| 0 | 0xFD3D | 0x1368 | 0 | 15000 NU | 45 periods . | Aug 06 21:00:30 PDT - Sep 20 21:00:30 PDT
| 1 | 0xFD3D | 0x1368 | 1 | 100000 NU | 300 periods | Oct 01 21:00:30 PDT - Jul 27 21:00:30 PDT
| 2 | 0x9CA6 | 0x3d51 | 0 | 175864.607570726413344034 NU | 33 periods . | Aug 25 21:00:30 PDT - Sep 27 21:00:30 PDT
| 3 | 0x9CA6 | 0x3d51 | 1 | 200000 NU | 365 periods | Oct 01 21:00:30 PDT - Sep 30 21:00:30 PDT
| 4 | 0x26FD | 0x4801 | 0 | 50000 NU | 60 periods . | Sep 01 21:00:30 PDT - Oct 31 21:00:30 PDT
Select Stake: 4
Enter target value (must be less than or equal to 50000 NU): 40000
Enter number of periods to extend: 250
Traceback (most recent call last):
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/config.py", line 217, in wrapper
return func(config, *args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/characters/stake.py", line 365, in stake
extension=extension)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/painting.py", line 406, in paint_staged_stake_division
new_end_period = original_stake.end_period + extension
AttributeError: 'Stake' object has no attribute 'end_period'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
AttributeError
|
def paint_stakes(emitter, stakes, paint_inactive: bool = False):
header = (
f"| ~ | Staker | Worker | # | Value | Duration | Enactment "
)
breaky = f"| | ------ | ------ | - | -------- | ------------ | ----------------------------------------- "
active_stakes = sorted(
(stake for stake in stakes if stake.is_active),
key=lambda some_stake: some_stake.address_index_ordering_key,
)
if active_stakes:
title = "======================================= Active Stakes =========================================\n"
emitter.echo(title)
emitter.echo(header, bold=True)
emitter.echo(breaky, bold=True)
for index, stake in enumerate(active_stakes):
row = prettify_stake(stake=stake, index=index)
row_color = (
"yellow"
if stake.worker_address == BlockchainInterface.NULL_ADDRESS
else "white"
)
emitter.echo(row, color=row_color)
emitter.echo("") # newline
if paint_inactive:
title = "\n====================================== Inactive Stakes ========================================\n"
emitter.echo(title)
emitter.echo(header, bold=True)
emitter.echo(breaky, bold=True)
for stake in sorted(
[s for s in stakes if s not in active_stakes], # TODO
key=lambda some_stake: some_stake.address_index_ordering_key,
):
row = prettify_stake(stake=stake, index=None)
emitter.echo(row, color="red")
emitter.echo("") # newline
elif not active_stakes:
emitter.echo(f"There are no active stakes\n")
|
def paint_stakes(emitter, stakes, paint_inactive: bool = False):
header = (
f"| ~ | Staker | Worker | # | Value | Duration | Enactment "
)
breaky = f"| | ------ | ------ | - | -------- | ------------ | ----------------------------------------- "
active_stakes = sorted(
(stake for stake in stakes if stake.is_active),
key=lambda some_stake: some_stake.address_index_ordering_key,
)
if active_stakes:
title = "======================================= Active Stakes =========================================\n"
emitter.echo(title)
emitter.echo(header, bold=True)
emitter.echo(breaky, bold=True)
for index, stake in enumerate(active_stakes):
row = prettify_stake(stake=stake, index=index)
row_color = (
"yellow"
if stake.worker_address == BlockchainInterface.NULL_ADDRESS
else "white"
)
emitter.echo(row, color=row_color)
emitter.echo("") # newline
if paint_inactive:
title = "\n====================================== Inactive Stakes ========================================\n"
emitter.echo(title)
emitter.echo(header, bold=True)
emitter.echo(breaky, bold=True)
for stake in sorted(
[s for s in stakes if s not in active_stakes],
key=lambda some_stake: some_stake.address_index_ordering_key,
):
row = prettify_stake(stake=stake, index=None)
emitter.echo(row, color="red")
emitter.echo("") # newline
elif not active_stakes:
emitter.echo(f"There are no active stakes\n")
|
https://github.com/nucypher/nucypher/issues/1375
|
____ __ __
/\ _`\ /\ \__ /\ \
\ \,\L\_\ \ ,_\ __ \ \ \/'\ __ _ __
\/_\__ \\ \ \/ /'__`\\ \ , < /'__`\/\`'__\
/\ \L\ \ \ \_/\ \L\.\\ \ \\`\ /\ __/\ \ \/
\ `\____\ \__\ \__/.\_\ \_\ \_\ \____\\ \_\
\/_____/\/__/\/__/\/_/\/_/\/_/\/____/ \/_/
The Holder of Stakes.
======================================= Active Stakes =========================================
| ~ | Staker | Worker | # | Value | Duration | Enactment
| | ------ | ------ | - | -------- | ------------ | -----------------------------------------
| 0 | 0xFD3D | 0x1368 | 0 | 15000 NU | 45 periods . | Aug 06 21:00:30 PDT - Sep 20 21:00:30 PDT
| 1 | 0xFD3D | 0x1368 | 1 | 100000 NU | 300 periods | Oct 01 21:00:30 PDT - Jul 27 21:00:30 PDT
| 2 | 0x9CA6 | 0x3d51 | 0 | 175864.607570726413344034 NU | 33 periods . | Aug 25 21:00:30 PDT - Sep 27 21:00:30 PDT
| 3 | 0x9CA6 | 0x3d51 | 1 | 200000 NU | 365 periods | Oct 01 21:00:30 PDT - Sep 30 21:00:30 PDT
| 4 | 0x26FD | 0x4801 | 0 | 50000 NU | 60 periods . | Sep 01 21:00:30 PDT - Oct 31 21:00:30 PDT
Select Stake: 4
Enter target value (must be less than or equal to 50000 NU): 40000
Enter number of periods to extend: 250
Traceback (most recent call last):
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/config.py", line 217, in wrapper
return func(config, *args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/characters/stake.py", line 365, in stake
extension=extension)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/painting.py", line 406, in paint_staged_stake_division
new_end_period = original_stake.end_period + extension
AttributeError: 'Stake' object has no attribute 'end_period'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
AttributeError
|
def paint_staged_stake_division(
emitter, stakeholder, original_stake, target_value, extension
):
new_end_period = original_stake.final_locked_period + extension
new_duration_periods = new_end_period - original_stake.first_locked_period
staking_address = original_stake.staker_address
division_message = f"""
Staking address: {staking_address}
~ Original Stake: {prettify_stake(stake=original_stake, index=None)}
"""
paint_staged_stake(
emitter=emitter,
stakeholder=stakeholder,
staking_address=staking_address,
stake_value=target_value,
lock_periods=new_duration_periods,
start_period=original_stake.first_locked_period,
unlock_period=new_end_period,
division_message=division_message,
)
|
def paint_staged_stake_division(
emitter, stakeholder, original_stake, target_value, extension
):
new_end_period = original_stake.end_period + extension
new_duration_periods = new_end_period - original_stake.start_period
staking_address = original_stake.owner_address
division_message = f"""
Staking address: {staking_address}
~ Original Stake: {prettify_stake(stake=original_stake, index=None)}
"""
paint_staged_stake(
emitter=emitter,
stakeholder=stakeholder,
staking_address=staking_address,
stake_value=target_value,
lock_periods=new_duration_periods,
start_period=original_stake.start_period,
unlock_period=new_end_period,
division_message=division_message,
)
|
https://github.com/nucypher/nucypher/issues/1375
|
____ __ __
/\ _`\ /\ \__ /\ \
\ \,\L\_\ \ ,_\ __ \ \ \/'\ __ _ __
\/_\__ \\ \ \/ /'__`\\ \ , < /'__`\/\`'__\
/\ \L\ \ \ \_/\ \L\.\\ \ \\`\ /\ __/\ \ \/
\ `\____\ \__\ \__/.\_\ \_\ \_\ \____\\ \_\
\/_____/\/__/\/__/\/_/\/_/\/_/\/____/ \/_/
The Holder of Stakes.
======================================= Active Stakes =========================================
| ~ | Staker | Worker | # | Value | Duration | Enactment
| | ------ | ------ | - | -------- | ------------ | -----------------------------------------
| 0 | 0xFD3D | 0x1368 | 0 | 15000 NU | 45 periods . | Aug 06 21:00:30 PDT - Sep 20 21:00:30 PDT
| 1 | 0xFD3D | 0x1368 | 1 | 100000 NU | 300 periods | Oct 01 21:00:30 PDT - Jul 27 21:00:30 PDT
| 2 | 0x9CA6 | 0x3d51 | 0 | 175864.607570726413344034 NU | 33 periods . | Aug 25 21:00:30 PDT - Sep 27 21:00:30 PDT
| 3 | 0x9CA6 | 0x3d51 | 1 | 200000 NU | 365 periods | Oct 01 21:00:30 PDT - Sep 30 21:00:30 PDT
| 4 | 0x26FD | 0x4801 | 0 | 50000 NU | 60 periods . | Sep 01 21:00:30 PDT - Oct 31 21:00:30 PDT
Select Stake: 4
Enter target value (must be less than or equal to 50000 NU): 40000
Enter number of periods to extend: 250
Traceback (most recent call last):
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/michwill/.local/share/virtualenvs/nucypher-kms-5SSQ9ZHE/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/config.py", line 217, in wrapper
return func(config, *args, **kwargs)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/characters/stake.py", line 365, in stake
extension=extension)
File "/home/michwill/Projects/nucypher-kms/nucypher/cli/painting.py", line 406, in paint_staged_stake_division
new_end_period = original_stake.end_period + extension
AttributeError: 'Stake' object has no attribute 'end_period'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
AttributeError
|
def from_seed_and_stake_info(
cls,
seed_uri: str,
federated_only: bool,
minimum_stake: int = 0,
registry: BaseContractRegistry = None,
network_middleware: RestMiddleware = None,
*args,
**kwargs,
) -> "Ursula":
if network_middleware is None:
network_middleware = RestMiddleware()
#
# WARNING: xxx Poison xxx
# Let's learn what we can about the ... "seednode".
#
# Parse node URI
host, port, checksum_address = parse_node_uri(seed_uri)
# Fetch the hosts TLS certificate and read the common name
certificate = network_middleware.get_certificate(host=host, port=port)
real_host = certificate.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
# Create a temporary certificate storage area
temp_node_storage = ForgetfulNodeStorage(federated_only=federated_only)
temp_certificate_filepath = temp_node_storage.store_node_certificate(
certificate=certificate
)
# Load the host as a potential seed node
potential_seed_node = cls.from_rest_url(
registry=registry,
host=real_host,
port=port,
network_middleware=network_middleware,
certificate_filepath=temp_certificate_filepath,
federated_only=federated_only,
*args,
**kwargs,
)
# Check the node's stake (optional)
if minimum_stake > 0 and not federated_only:
staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=registry)
seednode_stake = staking_agent.get_locked_tokens(
staker_address=checksum_address
)
if seednode_stake < minimum_stake:
raise Learner.NotATeacher(
f"{checksum_address} is staking less then the specified minimum stake value ({minimum_stake})."
)
# Verify the node's TLS certificate
try:
potential_seed_node.verify_node(
network_middleware=network_middleware,
registry=registry,
certificate_filepath=temp_certificate_filepath,
)
except potential_seed_node.InvalidNode:
# TODO: What if our seed node fails verification?
raise
# OK - everyone get out
temp_node_storage.forget()
return potential_seed_node
|
def from_seed_and_stake_info(
cls,
seed_uri: str,
federated_only: bool,
minimum_stake: int = 0,
registry: BaseContractRegistry = None,
network_middleware: RestMiddleware = None,
*args,
**kwargs,
) -> "Ursula":
if network_middleware is None:
network_middleware = RestMiddleware()
#
# WARNING: xxx Poison xxx
# Let's learn what we can about the ... "seednode".
#
# Parse node URI
host, port, checksum_address = parse_node_uri(seed_uri)
# Fetch the hosts TLS certificate and read the common name
certificate = network_middleware.get_certificate(host=host, port=port)
real_host = certificate.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
# Create a temporary certificate storage area
temp_node_storage = ForgetfulNodeStorage(federated_only=federated_only)
temp_certificate_filepath = temp_node_storage.store_node_certificate(
certificate=certificate
)
# Load the host as a potential seed node
potential_seed_node = cls.from_rest_url(
registry=registry,
host=real_host,
port=port,
network_middleware=network_middleware,
certificate_filepath=temp_certificate_filepath,
federated_only=federated_only,
*args,
**kwargs,
)
# Check the node's stake (optional)
if minimum_stake > 0 and not federated_only:
staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=registry)
seednode_stake = staking_agent.get_locked_tokens(
staker_address=checksum_address
)
if seednode_stake < minimum_stake:
raise Learner.NotATeacher(
f"{checksum_address} is staking less then the specified minimum stake value ({minimum_stake})."
)
# Verify the node's TLS certificate
try:
potential_seed_node.verify_node(
network_middleware=network_middleware,
accept_federated_only=federated_only,
certificate_filepath=temp_certificate_filepath,
)
except potential_seed_node.InvalidNode:
# TODO: What if our seed node fails verification?
raise
# OK - everyone get out
temp_node_storage.forget()
return potential_seed_node
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def from_bytes(
cls,
ursula_as_bytes: bytes,
version: int = INCLUDED_IN_BYTESTRING,
federated_only: bool = False,
registry: BaseContractRegistry = None,
) -> "Ursula":
if version is INCLUDED_IN_BYTESTRING:
version, payload = cls.version_splitter(ursula_as_bytes, return_remainder=True)
else:
payload = ursula_as_bytes
# Check version and raise IsFromTheFuture if this node is... you guessed it...
if version > cls.LEARNER_VERSION:
# Try to handle failure, even during failure, graceful degradation
# TODO: #154 - Some auto-updater logic?
try:
canonical_address, _ = BytestringSplitter(PUBLIC_ADDRESS_LENGTH)(
payload, return_remainder=True
)
checksum_address = to_checksum_address(canonical_address)
nickname, _ = nickname_from_seed(checksum_address)
display_name = cls._display_name_template.format(
cls.__name__, nickname, checksum_address
)
message = cls.unknown_version_message.format(
display_name, version, cls.LEARNER_VERSION
)
except BytestringSplittingError:
message = cls.really_unknown_version_message.format(
version, cls.LEARNER_VERSION
)
raise cls.IsFromTheFuture(message)
# Version stuff checked out. Moving on.
node_info = cls.internal_splitter(payload)
interface_info = node_info.pop("rest_interface")
node_info["rest_host"] = interface_info.host
node_info["rest_port"] = interface_info.port
node_info["timestamp"] = maya.MayaDT(node_info.pop("timestamp"))
node_info["checksum_address"] = to_checksum_address(node_info.pop("public_address"))
domains_vbytes = VariableLengthBytestring.dispense(node_info["domains"])
node_info["domains"] = set(d.decode("utf-8") for d in domains_vbytes)
ursula = cls.from_public_keys(federated_only=federated_only, **node_info)
return ursula
|
def from_bytes(
cls,
ursula_as_bytes: bytes,
version: int = INCLUDED_IN_BYTESTRING,
federated_only: bool = False,
registry: BaseContractRegistry = None,
) -> "Ursula":
if version is INCLUDED_IN_BYTESTRING:
version, payload = cls.version_splitter(ursula_as_bytes, return_remainder=True)
else:
payload = ursula_as_bytes
# Check version and raise IsFromTheFuture if this node is... you guessed it...
if version > cls.LEARNER_VERSION:
# Try to handle failure, even during failure, graceful degradation
# TODO: #154 - Some auto-updater logic?
try:
canonical_address, _ = BytestringSplitter(PUBLIC_ADDRESS_LENGTH)(
payload, return_remainder=True
)
checksum_address = to_checksum_address(canonical_address)
nickname, _ = nickname_from_seed(checksum_address)
display_name = cls._display_name_template.format(
cls.__name__, nickname, checksum_address
)
message = cls.unknown_version_message.format(
display_name, version, cls.LEARNER_VERSION
)
except BytestringSplittingError:
message = cls.really_unknown_version_message.format(
version, cls.LEARNER_VERSION
)
raise cls.IsFromTheFuture(message)
# Version stuff checked out. Moving on.
node_info = cls.internal_splitter(payload)
interface_info = node_info.pop("rest_interface")
node_info["rest_host"] = interface_info.host
node_info["rest_port"] = interface_info.port
node_info["timestamp"] = maya.MayaDT(node_info.pop("timestamp"))
node_info["checksum_address"] = to_checksum_address(node_info.pop("public_address"))
domains_vbytes = VariableLengthBytestring.dispense(node_info["domains"])
node_info["domains"] = set(d.decode("utf-8") for d in domains_vbytes)
ursula = cls.from_public_keys(
registry=registry, federated_only=federated_only, **node_info
)
return ursula
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def __setup_node_storage(self, node_storage=None) -> None:
if self.dev_mode:
node_storage = ForgetfulNodeStorage(
registry=self.registry, federated_only=self.federated_only
)
elif not node_storage:
node_storage = LocalFileBasedNodeStorage(
registry=self.registry,
config_root=self.config_root,
federated_only=self.federated_only,
)
self.node_storage = node_storage
|
def __setup_node_storage(self, node_storage=None) -> None:
if self.dev_mode:
node_storage = ForgetfulNodeStorage(
registry=self.registry, federated_only=self.federated_only
)
else:
node_storage = LocalFileBasedNodeStorage(
registry=self.registry,
config_root=self.config_root,
federated_only=self.federated_only,
)
self.node_storage = node_storage
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def __read_metadata(self, filepath: str, federated_only: bool):
from nucypher.characters.lawful import Ursula
try:
with open(filepath, "rb") as seed_file:
seed_file.seek(0)
node_bytes = self.deserializer(seed_file.read())
node = Ursula.from_bytes(
node_bytes, federated_only=federated_only
) # TODO: #466
except FileNotFoundError:
raise self.UnknownNode
return node
|
def __read_metadata(
self, filepath: str, federated_only: bool, registry: BaseContractRegistry = None
):
# TODO: Use registry None to indicate federated only
from nucypher.characters.lawful import Ursula
try:
with open(filepath, "rb") as seed_file:
seed_file.seek(0)
node_bytes = self.deserializer(seed_file.read())
node = Ursula.from_bytes(
node_bytes, registry=registry, federated_only=federated_only
)
except FileNotFoundError:
raise self.UnknownNode
return node
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def all(
self, federated_only: bool, certificates_only: bool = False
) -> Set[Union[Any, Certificate]]:
filenames = os.listdir(
self.certificates_dir if certificates_only else self.metadata_dir
)
self.log.info(
"Found {} known node metadata files at {}".format(
len(filenames), self.metadata_dir
)
)
known_certificates = set()
if certificates_only:
for filename in filenames:
certificate = self.__read_tls_public_certificate(
os.path.join(self.certificates_dir, filename)
)
known_certificates.add(certificate)
return known_certificates
else:
known_nodes = set()
for filename in filenames:
metadata_path = os.path.join(self.metadata_dir, filename)
node = self.__read_metadata(
filepath=metadata_path, federated_only=federated_only
) # TODO: 466
known_nodes.add(node)
return known_nodes
|
def all(
self, federated_only: bool, certificates_only: bool = False
) -> Set[Union[Any, Certificate]]:
filenames = os.listdir(
self.certificates_dir if certificates_only else self.metadata_dir
)
self.log.info(
"Found {} known node metadata files at {}".format(
len(filenames), self.metadata_dir
)
)
known_certificates = set()
if certificates_only:
for filename in filenames:
certificate = self.__read_tls_public_certificate(
os.path.join(self.certificates_dir, filename)
)
known_certificates.add(certificate)
return known_certificates
else:
known_nodes = set()
for filename in filenames:
metadata_path = os.path.join(self.metadata_dir, filename)
node = self.__read_metadata(
filepath=metadata_path,
registry=self.registry,
federated_only=federated_only,
) # TODO: 466
known_nodes.add(node)
return known_nodes
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def get(
self, checksum_address: str, federated_only: bool, certificate_only: bool = False
):
if certificate_only is True:
certificate = self.__read_tls_public_certificate(
checksum_address=checksum_address
)
return certificate
metadata_path = self.__generate_metadata_filepath(checksum_address=checksum_address)
node = self.__read_metadata(
filepath=metadata_path, federated_only=federated_only
) # TODO: 466
return node
|
def get(
self, checksum_address: str, federated_only: bool, certificate_only: bool = False
):
if certificate_only is True:
certificate = self.__read_tls_public_certificate(
checksum_address=checksum_address
)
return certificate
metadata_path = self.__generate_metadata_filepath(checksum_address=checksum_address)
node = self.__read_metadata(
filepath=metadata_path, registry=self.registry, federated_only=federated_only
) # TODO: 466
return node
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def __init__(
self,
domains: set,
network_middleware: RestMiddleware = __DEFAULT_MIDDLEWARE_CLASS(),
start_learning_now: bool = False,
learn_on_same_thread: bool = False,
known_nodes: tuple = None,
seed_nodes: Tuple[tuple] = None,
node_storage=None,
save_metadata: bool = False,
abort_on_learning_error: bool = False,
lonely: bool = False,
) -> None:
self.log = Logger("learning-loop") # type: Logger
self.learning_domains = domains
self.network_middleware = network_middleware
self.save_metadata = save_metadata
self.start_learning_now = start_learning_now
self.learn_on_same_thread = learn_on_same_thread
self._abort_on_learning_error = abort_on_learning_error
self._learning_listeners = defaultdict(list)
self._node_ids_to_learn_about_immediately = set()
self.__known_nodes = self.tracker_class()
self.lonely = lonely
self.done_seeding = False
if not node_storage:
# Fallback storage backend
node_storage = self.__DEFAULT_NODE_STORAGE(federated_only=self.federated_only)
self.node_storage = node_storage
if save_metadata and node_storage is NO_STORAGE_AVAILIBLE:
raise ValueError("Cannot save nodes without a configured node storage")
known_nodes = known_nodes or tuple()
self.unresponsive_startup_nodes = (
list()
) # TODO: Buckets - Attempt to use these again later
for node in known_nodes:
try:
self.remember_node(node)
except self.UnresponsiveTeacher:
self.unresponsive_startup_nodes.append(node)
self.teacher_nodes = deque()
self._current_teacher_node = None # type: Teacher
self._learning_task = task.LoopingCall(self.keep_learning_about_nodes)
self._learning_round = 0 # type: int
self._rounds_without_new_nodes = 0 # type: int
self._seed_nodes = seed_nodes or []
self.unresponsive_seed_nodes = set()
if self.start_learning_now:
self.start_learning_loop(now=self.learn_on_same_thread)
|
def __init__(
self,
domains: set,
network_middleware: RestMiddleware = __DEFAULT_MIDDLEWARE_CLASS(),
start_learning_now: bool = False,
learn_on_same_thread: bool = False,
known_nodes: tuple = None,
seed_nodes: Tuple[tuple] = None,
node_storage=None,
save_metadata: bool = False,
abort_on_learning_error: bool = False,
lonely: bool = False,
) -> None:
self.log = Logger("learning-loop") # type: Logger
self.learning_domains = domains
self.network_middleware = network_middleware
self.save_metadata = save_metadata
self.start_learning_now = start_learning_now
self.learn_on_same_thread = learn_on_same_thread
self._abort_on_learning_error = abort_on_learning_error
self._learning_listeners = defaultdict(list)
self._node_ids_to_learn_about_immediately = set()
self.__known_nodes = self.tracker_class()
self.lonely = lonely
self.done_seeding = False
# Read
if node_storage is None:
from nucypher.characters.lawful import Ursula
node_storage = self.__DEFAULT_NODE_STORAGE(
federated_only=self.federated_only, # TODO: #466
character_class=Ursula,
)
self.node_storage = node_storage
if save_metadata and node_storage is NO_STORAGE_AVAILIBLE:
raise ValueError("Cannot save nodes without a configured node storage")
known_nodes = known_nodes or tuple()
self.unresponsive_startup_nodes = (
list()
) # TODO: Buckets - Attempt to use these again later
for node in known_nodes:
try:
self.remember_node(node)
except self.UnresponsiveTeacher:
self.unresponsive_startup_nodes.append(node)
self.teacher_nodes = deque()
self._current_teacher_node = None # type: Teacher
self._learning_task = task.LoopingCall(self.keep_learning_about_nodes)
self._learning_round = 0 # type: int
self._rounds_without_new_nodes = 0 # type: int
self._seed_nodes = seed_nodes or []
self.unresponsive_seed_nodes = set()
if self.start_learning_now:
self.start_learning_loop(now=self.learn_on_same_thread)
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def remember_node(self, node, force_verification_check=False, record_fleet_state=True):
if node == self: # No need to remember self.
return False
# First, determine if this is an outdated representation of an already known node.
# TODO: #1032
with suppress(KeyError):
already_known_node = self.known_nodes[node.checksum_address]
if not node.timestamp > already_known_node.timestamp:
self.log.debug("Skipping already known node {}".format(already_known_node))
# This node is already known. We can safely return.
return False
try:
stranger_certificate = node.certificate
except AttributeError:
# Whoops, we got an Alice, Bob, or someone...
raise self.NotATeacher(
f"{node.__class__.__name__} does not have a certificate and cannot be remembered."
)
# Store node's certificate - It has been seen.
certificate_filepath = self.node_storage.store_node_certificate(
certificate=stranger_certificate
)
# In some cases (seed nodes or other temp stored certs),
# this will update the filepath from the temp location to this one.
node.certificate_filepath = certificate_filepath
self.log.info(f"Saved TLS certificate for {node.nickname}: {certificate_filepath}")
try:
node.verify_node(
force=force_verification_check,
network_middleware=self.network_middleware,
registry=self.registry,
) # composed on character subclass
except SSLError:
return False # TODO: Bucket this node as having bad TLS info - maybe it's an update that hasn't fully propagated?
except NodeSeemsToBeDown:
self.log.info(
"No Response while trying to verify node {}|{}".format(
node.rest_interface, node
)
)
return False # TODO: Bucket this node as "ghost" or something: somebody else knows about it, but we can't get to it.
listeners = self._learning_listeners.pop(node.checksum_address, tuple())
address = node.checksum_address
self.known_nodes[address] = node
if self.save_metadata:
self.node_storage.store_node_metadata(node=node)
self.log.info(
"Remembering {} ({}), popping {} listeners.".format(
node.nickname, node.checksum_address, len(listeners)
)
)
for listener in listeners:
listener.add(address)
self._node_ids_to_learn_about_immediately.discard(address)
if record_fleet_state:
self.known_nodes.record_fleet_state()
return node
|
def remember_node(self, node, force_verification_check=False, record_fleet_state=True):
if node == self: # No need to remember self.
return False
# First, determine if this is an outdated representation of an already known node.
# TODO: #1032
with suppress(KeyError):
already_known_node = self.known_nodes[node.checksum_address]
if not node.timestamp > already_known_node.timestamp:
self.log.debug("Skipping already known node {}".format(already_known_node))
# This node is already known. We can safely return.
return False
try:
stranger_certificate = node.certificate
except AttributeError:
# Whoops, we got an Alice, Bob, or someone...
raise self.NotATeacher(
f"{node.__class__.__name__} does not have a certificate and cannot be remembered."
)
# Store node's certificate - It has been seen.
certificate_filepath = self.node_storage.store_node_certificate(
certificate=stranger_certificate
)
# In some cases (seed nodes or other temp stored certs),
# this will update the filepath from the temp location to this one.
node.certificate_filepath = certificate_filepath
self.log.info(f"Saved TLS certificate for {node.nickname}: {certificate_filepath}")
try:
node.verify_node(
force=force_verification_check,
network_middleware=self.network_middleware,
accept_federated_only=self.federated_only, # TODO: 466 - move federated-only up to Learner?
)
except SSLError:
return False # TODO: Bucket this node as having bad TLS info - maybe it's an update that hasn't fully propagated?
except NodeSeemsToBeDown:
self.log.info(
"No Response while trying to verify node {}|{}".format(
node.rest_interface, node
)
)
return False # TODO: Bucket this node as "ghost" or something: somebody else knows about it, but we can't get to it.
listeners = self._learning_listeners.pop(node.checksum_address, tuple())
address = node.checksum_address
self.known_nodes[address] = node
if self.save_metadata:
self.node_storage.store_node_metadata(node=node)
self.log.info(
"Remembering {} ({}), popping {} listeners.".format(
node.nickname, node.checksum_address, len(listeners)
)
)
for listener in listeners:
listener.add(address)
self._node_ids_to_learn_about_immediately.discard(address)
if record_fleet_state:
self.known_nodes.record_fleet_state()
return node
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def learn_from_teacher_node(self, eager=True):
"""
Sends a request to node_url to find out about known nodes.
"""
self._learning_round += 1
try:
current_teacher = self.current_teacher_node()
except self.NotEnoughTeachers as e:
self.log.warn("Can't learn right now: {}".format(e.args[0]))
return
if Teacher in self.__class__.__bases__:
announce_nodes = [self]
else:
announce_nodes = None
unresponsive_nodes = set()
#
# Request
#
try:
response = self.network_middleware.get_nodes_via_rest(
node=current_teacher,
nodes_i_need=self._node_ids_to_learn_about_immediately,
announce_nodes=announce_nodes,
fleet_checksum=self.known_nodes.checksum,
)
except NodeSeemsToBeDown as e:
unresponsive_nodes.add(current_teacher)
self.log.info("Bad Response from teacher: {}:{}.".format(current_teacher, e))
return
finally:
self.cycle_teacher_node()
# Before we parse the response, let's handle some edge cases.
if response.status_code == 204:
# In this case, this node knows about no other nodes. Hopefully we've taught it something.
if response.content == b"":
return NO_KNOWN_NODES
# In the other case - where the status code is 204 but the repsonse isn't blank - we'll keep parsing.
# It's possible that our fleet states match, and we'll check for that later.
elif response.status_code != 200:
self.log.info(
"Bad response from teacher {}: {} - {}".format(
current_teacher, response, response.content
)
)
return
#
# Deserialize
#
try:
signature, node_payload = signature_splitter(
response.content, return_remainder=True
)
except BytestringSplittingError as e:
self.log.warn(
"No signature prepended to Teacher {} payload: {}".format(
current_teacher, response.content
)
)
return
try:
self.verify_from(current_teacher, node_payload, signature=signature)
except current_teacher.InvalidSignature:
# TODO: What to do if the teacher improperly signed the node payload?
raise
# End edge case handling.
fleet_state_checksum_bytes, fleet_state_updated_bytes, node_payload = (
FleetStateTracker.snapshot_splitter(node_payload, return_remainder=True)
)
current_teacher.last_seen = maya.now()
# TODO: This is weird - let's get a stranger FleetState going.
checksum = fleet_state_checksum_bytes.hex()
# TODO: This doesn't make sense - a decentralized node can still learn about a federated-only node.
from nucypher.characters.lawful import Ursula
if constant_or_bytes(node_payload) is FLEET_STATES_MATCH:
current_teacher.update_snapshot(
checksum=checksum,
updated=maya.MayaDT(
int.from_bytes(fleet_state_updated_bytes, byteorder="big")
),
number_of_known_nodes=len(self.known_nodes),
)
return FLEET_STATES_MATCH
node_list = Ursula.batch_from_bytes(
node_payload, registry=self.registry, federated_only=self.federated_only
) # TODO: 466
current_teacher.update_snapshot(
checksum=checksum,
updated=maya.MayaDT(int.from_bytes(fleet_state_updated_bytes, byteorder="big")),
number_of_known_nodes=len(node_list),
)
new_nodes = []
for node in node_list:
if not set(self.learning_domains).intersection(set(node.serving_domains)):
self.log.debug(
f"Teacher {node} is serving {node.serving_domains}, but we're only learning {self.learning_domains}."
)
continue # This node is not serving any of our domains.
# First, determine if this is an outdated representation of an already known node.
# TODO: #1032
with suppress(KeyError):
already_known_node = self.known_nodes[node.checksum_address]
if not node.timestamp > already_known_node.timestamp:
self.log.debug(
"Skipping already known node {}".format(already_known_node)
)
# This node is already known. We can safely continue to the next.
continue
#
# Verify Node
#
certificate_filepath = self.node_storage.store_node_certificate(
certificate=node.certificate
)
try:
if eager:
node.verify_node(
self.network_middleware,
registry=self.registry,
certificate_filepath=certificate_filepath,
)
self.log.debug("Verified node: {}".format(node.checksum_address))
else:
node.validate_metadata(registry=self.registry)
#
# Report Failure
#
except NodeSeemsToBeDown:
self.log.info(
f"Verification Failed - Cannot establish connection to {node}."
)
except node.StampNotSigned:
self.log.warn(f"Verification Failed - {node} stamp is unsigned.")
except node.NotStaking:
self.log.warn(
f"Verification Failed - "
f"{node} has no active stakes in the current period "
f"({self.staking_agent.get_current_period()}"
)
except node.InvalidWorkerSignature:
self.log.warn(
f"Verification Failed - "
f"{node} has an invalid wallet signature for {node.decentralized_identity_evidence}"
)
except node.DetachedWorker:
self.log.warn(f"Verification Failed - {node} is not bonded to a Staker.")
except node.InvalidNode:
self.log.warn(node.invalid_metadata_message.format(node))
except node.SuspiciousActivity:
message = (
f"Suspicious Activity: Discovered node with bad signature: {node}."
f"Propagated by: {current_teacher}"
)
self.log.warn(message)
#
# Success
#
else:
new = self.remember_node(node, record_fleet_state=False)
if new:
new_nodes.append(node)
#
# Continue
#
self._adjust_learning(new_nodes)
learning_round_log_message = (
"Learning round {}. Teacher: {} knew about {} nodes, {} were new."
)
self.log.info(
learning_round_log_message.format(
self._learning_round, current_teacher, len(node_list), len(new_nodes)
)
)
if new_nodes:
self.known_nodes.record_fleet_state()
for node in new_nodes:
self.node_storage.store_node_certificate(certificate=node.certificate)
return new_nodes
|
def learn_from_teacher_node(self, eager=True):
"""
Sends a request to node_url to find out about known nodes.
"""
self._learning_round += 1
try:
current_teacher = self.current_teacher_node()
except self.NotEnoughTeachers as e:
self.log.warn("Can't learn right now: {}".format(e.args[0]))
return
if Teacher in self.__class__.__bases__:
announce_nodes = [self]
else:
announce_nodes = None
unresponsive_nodes = set()
#
# Request
#
try:
response = self.network_middleware.get_nodes_via_rest(
node=current_teacher,
nodes_i_need=self._node_ids_to_learn_about_immediately,
announce_nodes=announce_nodes,
fleet_checksum=self.known_nodes.checksum,
)
except NodeSeemsToBeDown as e:
unresponsive_nodes.add(current_teacher)
self.log.info("Bad Response from teacher: {}:{}.".format(current_teacher, e))
return
finally:
self.cycle_teacher_node()
# Before we parse the response, let's handle some edge cases.
if response.status_code == 204:
# In this case, this node knows about no other nodes. Hopefully we've taught it something.
if response.content == b"":
return NO_KNOWN_NODES
# In the other case - where the status code is 204 but the repsonse isn't blank - we'll keep parsing.
# It's possible that our fleet states match, and we'll check for that later.
elif response.status_code != 200:
self.log.info(
"Bad response from teacher {}: {} - {}".format(
current_teacher, response, response.content
)
)
return
#
# Deserialize
#
try:
signature, node_payload = signature_splitter(
response.content, return_remainder=True
)
except BytestringSplittingError as e:
self.log.warn(
"No signature prepended to Teacher {} payload: {}".format(
current_teacher, response.content
)
)
return
try:
self.verify_from(current_teacher, node_payload, signature=signature)
except current_teacher.InvalidSignature:
# TODO: What to do if the teacher improperly signed the node payload?
raise
# End edge case handling.
fleet_state_checksum_bytes, fleet_state_updated_bytes, node_payload = (
FleetStateTracker.snapshot_splitter(node_payload, return_remainder=True)
)
current_teacher.last_seen = maya.now()
# TODO: This is weird - let's get a stranger FleetState going.
checksum = fleet_state_checksum_bytes.hex()
# TODO: This doesn't make sense - a decentralized node can still learn about a federated-only node.
from nucypher.characters.lawful import Ursula
if constant_or_bytes(node_payload) is FLEET_STATES_MATCH:
current_teacher.update_snapshot(
checksum=checksum,
updated=maya.MayaDT(
int.from_bytes(fleet_state_updated_bytes, byteorder="big")
),
number_of_known_nodes=len(self.known_nodes),
)
return FLEET_STATES_MATCH
node_list = Ursula.batch_from_bytes(
node_payload, registry=self.registry, federated_only=self.federated_only
) # TODO: 466
current_teacher.update_snapshot(
checksum=checksum,
updated=maya.MayaDT(int.from_bytes(fleet_state_updated_bytes, byteorder="big")),
number_of_known_nodes=len(node_list),
)
new_nodes = []
for node in node_list:
if not set(self.learning_domains).intersection(set(node.serving_domains)):
self.log.debug(
f"Teacher {node} is serving {node.serving_domains}, but we're only learning {self.learning_domains}."
)
continue # This node is not serving any of our domains.
# First, determine if this is an outdated representation of an already known node.
# TODO: #1032
with suppress(KeyError):
already_known_node = self.known_nodes[node.checksum_address]
if not node.timestamp > already_known_node.timestamp:
self.log.debug(
"Skipping already known node {}".format(already_known_node)
)
# This node is already known. We can safely continue to the next.
continue
#
# Verify Node
#
certificate_filepath = self.node_storage.store_node_certificate(
certificate=node.certificate
)
try:
if eager:
node.verify_node(
self.network_middleware,
accept_federated_only=self.federated_only, # TODO: 466
certificate_filepath=certificate_filepath,
)
self.log.debug("Verified node: {}".format(node.checksum_address))
else:
node.validate_metadata(
accept_federated_only=self.federated_only
) # TODO: 466
#
# Report Failure
#
except NodeSeemsToBeDown as e:
self.log.info(
f"Verification Failed - Cannot establish connection to {node}."
)
except node.StampNotSigned:
self.log.warn(f"Verification Failed - {node} stamp is unsigned.")
except node.NotStaking:
self.log.warn(
f"Verification Failed - "
f"{node} has no active stakes in the current period "
f"({self.staking_agent.get_current_period()}"
)
except node.InvalidWorkerSignature:
self.log.warn(
f"Verification Failed - "
f"{node} has an invalid wallet signature for {node.decentralized_identity_evidence}"
)
except node.DetachedWorker:
self.log.warn(f"Verification Failed - {node} is not bonded to a Staker.")
except node.InvalidNode:
self.log.warn(node.invalid_metadata_message.format(node))
except node.SuspiciousActivity:
message = (
f"Suspicious Activity: Discovered node with bad signature: {node}."
f"Propagated by: {current_teacher}"
)
self.log.warn(message)
#
# Success
#
else:
new = self.remember_node(node, record_fleet_state=False)
if new:
new_nodes.append(node)
#
# Continue
#
self._adjust_learning(new_nodes)
learning_round_log_message = (
"Learning round {}. Teacher: {} knew about {} nodes, {} were new."
)
self.log.info(
learning_round_log_message.format(
self._learning_round, current_teacher, len(node_list), len(new_nodes)
)
)
if new_nodes:
self.known_nodes.record_fleet_state()
for node in new_nodes:
self.node_storage.store_node_certificate(certificate=node.certificate)
return new_nodes
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def _worker_is_bonded_to_staker(self, registry: BaseContractRegistry) -> bool:
"""
This method assumes the stamp's signature is valid and accurate.
As a follow-up, this checks that the worker is linked to a staker, but it may be
the case that the "staker" isn't "staking" (e.g., all her tokens have been slashed).
"""
# Lazy agent get or create
staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=registry)
staker_address = staking_agent.get_staker_from_worker(
worker_address=self.worker_address
)
if staker_address == BlockchainInterface.NULL_ADDRESS:
raise self.DetachedWorker(f"Worker {self.worker_address} is detached")
return staker_address == self.checksum_address
|
def _worker_is_bonded_to_staker(self) -> bool:
"""
This method assumes the stamp's signature is valid and accurate.
As a follow-up, this checks that the worker is linked to a staker, but it may be
the case that the "staker" isn't "staking" (e.g., all her tokens have been slashed).
"""
staker_address = self.staking_agent.get_staker_from_worker(
worker_address=self.worker_address
)
if staker_address == BlockchainInterface.NULL_ADDRESS:
raise self.DetachedWorker(f"Worker {self.worker_address} is detached")
return staker_address == self.checksum_address
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def _staker_is_really_staking(self, registry: BaseContractRegistry) -> bool:
"""
This method assumes the stamp's signature is valid and accurate.
As a follow-up, this checks that the staker is, indeed, staking.
"""
# Lazy agent get or create
staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=registry)
locked_tokens = staking_agent.get_locked_tokens(
staker_address=self.checksum_address
)
return locked_tokens > 0 # TODO: Consider min stake size #1115
|
def _staker_is_really_staking(self) -> bool:
"""
This method assumes the stamp's signature is valid and accurate.
As a follow-up, this checks that the staker is, indeed, staking.
"""
locked_tokens = self.staking_agent.get_locked_tokens(
staker_address=self.checksum_address
)
return locked_tokens > 0 # TODO: Consider min stake size #1115
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def validate_worker(self, registry: BaseContractRegistry = None) -> None:
# Federated
if self.federated_only:
message = (
"This node cannot be verified in this manner, "
"but is OK to use in federated mode if you "
"have reason to believe it is trustworthy."
)
raise self.WrongMode(message)
# Decentralized
else:
if self.__decentralized_identity_evidence is NOT_SIGNED:
raise self.StampNotSigned
# Off-chain signature verification
if not self._stamp_has_valid_signature_by_worker():
message = (
f"Invalid signature {self.__decentralized_identity_evidence.hex()} "
f"from worker {self.worker_address} for stamp {bytes(self.stamp).hex()} "
)
raise self.InvalidWorkerSignature(message)
# On-chain staking check, if registry is present
if registry:
if not self._worker_is_bonded_to_staker(
registry=registry
): # <-- Blockchain CALL
message = f"Worker {self.worker_address} is not bonded to staker {self.checksum_address}"
raise self.DetachedWorker(message)
if self._staker_is_really_staking(registry=registry): # <-- Blockchain CALL
self.verified_worker = True
else:
raise self.NotStaking(f"Staker {self.checksum_address} is not staking")
self.verified_stamp = True
|
def validate_worker(self, verify_staking: bool = True) -> None:
# Federated
if self.federated_only:
message = (
"This node cannot be verified in this manner, "
"but is OK to use in federated mode if you "
"have reason to believe it is trustworthy."
)
raise self.WrongMode(message)
# Decentralized
else:
if self.__decentralized_identity_evidence is NOT_SIGNED:
raise self.StampNotSigned
# Off-chain signature verification
if not self._stamp_has_valid_signature_by_worker():
message = (
f"Invalid signature {self.__decentralized_identity_evidence.hex()} "
f"from worker {self.worker_address} for stamp {bytes(self.stamp).hex()} "
)
raise self.InvalidWorkerSignature(message)
# On-chain staking check
if verify_staking:
if not self._worker_is_bonded_to_staker(): # <-- Blockchain CALL
message = f"Worker {self.worker_address} is not bonded to staker {self.checksum_address}"
raise self.DetachedWorker(message)
if self._staker_is_really_staking(): # <-- Blockchain CALL
self.verified_worker = True
else:
raise self.NotStaking(f"Staker {self.checksum_address} is not staking")
self.verified_stamp = True
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def validate_metadata(self, registry: BaseContractRegistry = None):
# Verify the interface signature
if not self.verified_interface:
self.validate_interface()
# Verify the identity evidence
if self.verified_stamp:
return
# Offline check of valid stamp signature by worker
try:
self.validate_worker(registry=registry)
except self.WrongMode:
if bool(registry):
raise
|
def validate_metadata(
self, accept_federated_only: bool = False, verify_staking: bool = True
):
# Verify the interface signature
if not self.verified_interface:
self.validate_interface()
# Verify the identity evidence
if self.verified_stamp:
return
# Offline check of valid stamp signature by worker
try:
self.validate_worker(verify_staking=verify_staking)
except self.WrongMode:
if not accept_federated_only:
raise
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def verify_node(
self,
network_middleware,
certificate_filepath: str = None,
force: bool = False,
registry: BaseContractRegistry = None,
) -> bool:
"""
Three things happening here:
* Verify that the stamp matches the address (raises InvalidNode is it's not valid,
or WrongMode if it's a federated mode and being verified as a decentralized node)
* Verify the interface signature (raises InvalidNode if not valid)
* Connect to the node, make sure that it's up, and that the signature and address we
checked are the same ones this node is using now. (raises InvalidNode if not valid;
also emits a specific warning depending on which check failed).
"""
if force:
self.verified_interface = False
self.verified_node = False
self.verified_stamp = False
self.verified_worker = False
if self.verified_node:
return True
if not registry and not self.federated_only: # TODO: # 466
self.log.debug(
"No registry provided for decentralized stranger node verification - "
"on-chain Staking verification will not be performed."
)
# This is both the stamp's client signature and interface metadata check; May raise InvalidNode
self.validate_metadata(registry=registry)
# The node's metadata is valid; let's be sure the interface is in order.
if not certificate_filepath:
if self.certificate_filepath is CERTIFICATE_NOT_SAVED:
raise TypeError("We haven't saved a certificate for this node yet.")
else:
certificate_filepath = self.certificate_filepath
response_data = network_middleware.node_information(
host=self.rest_interface.host,
port=self.rest_interface.port,
certificate_filepath=certificate_filepath,
)
version, node_bytes = self.version_splitter(response_data, return_remainder=True)
node_details = self.internal_splitter(node_bytes)
# TODO: #589 - check timestamp here.
verifying_keys_match = node_details["verifying_key"] == self.public_keys(
SigningPower
)
encrypting_keys_match = node_details["encrypting_key"] == self.public_keys(
DecryptingPower
)
addresses_match = node_details["public_address"] == self.canonical_public_address
evidence_matches = (
node_details["decentralized_identity_evidence"]
== self.__decentralized_identity_evidence
)
if not all(
(encrypting_keys_match, verifying_keys_match, addresses_match, evidence_matches)
):
# Failure
if not addresses_match:
self.log.warn(
"Wallet address swapped out. It appears that someone is trying to defraud this node."
)
if not verifying_keys_match:
self.log.warn(
"Verifying key swapped out. It appears that someone is impersonating this node."
)
# TODO: #355 - Optional reporting.
raise self.InvalidNode(
"Wrong cryptographic material for this node - something fishy going on."
)
else:
# Success
self.verified_node = True
|
def verify_node(
self,
network_middleware,
certificate_filepath: str = None,
accept_federated_only: bool = False,
force: bool = False,
) -> bool:
"""
Three things happening here:
* Verify that the stamp matches the address (raises InvalidNode is it's not valid,
or WrongMode if it's a federated mode and being verified as a decentralized node)
* Verify the interface signature (raises InvalidNode if not valid)
* Connect to the node, make sure that it's up, and that the signature and address we
checked are the same ones this node is using now. (raises InvalidNode if not valid;
also emits a specific warning depending on which check failed).
"""
if force:
self.verified_interface = False
self.verified_node = False
self.verified_stamp = False
self.verified_worker = False
if self.verified_node:
return True
# This is both the stamp's client signature and interface metadata check; May raise InvalidNode
self.validate_metadata(accept_federated_only=accept_federated_only)
# The node's metadata is valid; let's be sure the interface is in order.
if not certificate_filepath:
if self.certificate_filepath is CERTIFICATE_NOT_SAVED:
raise TypeError("We haven't saved a certificate for this node yet.")
else:
certificate_filepath = self.certificate_filepath
response_data = network_middleware.node_information(
host=self.rest_interface.host,
port=self.rest_interface.port,
certificate_filepath=certificate_filepath,
)
version, node_bytes = self.version_splitter(response_data, return_remainder=True)
node_details = self.internal_splitter(node_bytes)
# TODO: #589 - check timestamp here.
verifying_keys_match = node_details["verifying_key"] == self.public_keys(
SigningPower
)
encrypting_keys_match = node_details["encrypting_key"] == self.public_keys(
DecryptingPower
)
addresses_match = node_details["public_address"] == self.canonical_public_address
evidence_matches = (
node_details["decentralized_identity_evidence"]
== self.__decentralized_identity_evidence
)
if not all(
(encrypting_keys_match, verifying_keys_match, addresses_match, evidence_matches)
):
# Failure
if not addresses_match:
self.log.warn(
"Wallet address swapped out. It appears that someone is trying to defraud this node."
)
if not verifying_keys_match:
self.log.warn(
"Verifying key swapped out. It appears that someone is impersonating this node."
)
# TODO: #355 - Optional reporting.
raise self.InvalidNode(
"Wrong cryptographic material for this node - something fishy going on."
)
else:
# Success
self.verified_node = True
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def make_rest_app(
db_filepath: str, this_node, serving_domains, log=Logger("http-application-layer")
) -> Tuple:
forgetful_node_storage = ForgetfulNodeStorage(
federated_only=this_node.federated_only
)
from nucypher.keystore import keystore
from nucypher.keystore.db import Base
from sqlalchemy.engine import create_engine
log.info("Starting datastore {}".format(db_filepath))
# See: https://docs.sqlalchemy.org/en/rel_0_9/dialects/sqlite.html#connect-strings
if db_filepath:
db_uri = f"sqlite:///{db_filepath}"
else:
db_uri = "sqlite://" # TODO: Is this a sane default? See #667
engine = create_engine(db_uri)
Base.metadata.create_all(engine)
datastore = keystore.KeyStore(engine)
db_engine = engine
from nucypher.characters.lawful import Alice, Ursula
_alice_class = Alice
_node_class = Ursula
rest_app = Flask("ursula-service")
@rest_app.route("/public_information")
def public_information():
"""
REST endpoint for public keys and address.
"""
response = Response(
response=bytes(this_node), mimetype="application/octet-stream"
)
return response
@rest_app.route("/node_metadata", methods=["GET"])
def all_known_nodes():
headers = {"Content-Type": "application/octet-stream"}
if this_node.known_nodes.checksum is NO_KNOWN_NODES:
return Response(b"", headers=headers, status=204)
payload = this_node.known_nodes.snapshot()
ursulas_as_vbytes = (VariableLengthBytestring(n) for n in this_node.known_nodes)
ursulas_as_bytes = bytes().join(bytes(u) for u in ursulas_as_vbytes)
ursulas_as_bytes += VariableLengthBytestring(bytes(this_node))
payload += ursulas_as_bytes
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
@rest_app.route("/node_metadata", methods=["POST"])
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
log.debug(
"Learner already knew fleet state {}; doing nothing.".format(
learner_fleet_state
)
)
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
nodes = _node_class.batch_from_bytes(
request.data,
registry=this_node.registry,
federated_only=this_node.federated_only,
) # TODO: 466
# TODO: This logic is basically repeated in learn_from_teacher_node and remember_node.
# Let's find a better way. #555
for node in nodes:
if not set(serving_domains).intersection(set(node.serving_domains)):
continue # This node is not serving any of our domains.
if node in this_node.known_nodes:
if (
node.timestamp
<= this_node.known_nodes[node.checksum_address].timestamp
):
continue
@crosstown_traffic()
def learn_about_announced_nodes():
try:
certificate_filepath = (
forgetful_node_storage.store_node_certificate(
certificate=node.certificate
)
)
node.verify_node(
this_node.network_middleware,
registry=this_node.registry,
certificate_filepath=certificate_filepath,
)
# Suspicion
except node.SuspiciousActivity as e:
# TODO: Include data about caller?
# TODO: Account for possibility that stamp, rather than interface, was bad.
# TODO: Maybe also record the bytes representation separately to disk?
message = f"Suspicious Activity about {node}: {str(e)}. Announced via REST."
log.warn(message)
this_node.suspicious_activities_witnessed["vladimirs"].append(node)
except NodeSeemsToBeDown as e:
# This is a rather odd situation - this node *just* contacted us and asked to be verified. Where'd it go? Maybe a NAT problem?
log.info(
f"Node announced itself to us just now, but seems to be down: {node}. Response was {e}."
)
log.debug(f"Phantom node certificate: {node.certificate}")
# Async Sentinel
except Exception as e:
log.critical(
f"This exception really needs to be handled differently: {e}"
)
raise
# Believable
else:
log.info("Learned about previously unknown node: {}".format(node))
this_node.remember_node(node)
# TODO: Record new fleet state
# Cleanup
finally:
forgetful_node_storage.forget()
# TODO: What's the right status code here? 202? Different if we already knew about the node?
return all_known_nodes()
@rest_app.route("/consider_arrangement", methods=["POST"])
def consider_arrangement():
from nucypher.policy.policies import Arrangement
arrangement = Arrangement.from_bytes(request.data)
with ThreadedSession(db_engine) as session:
new_policy_arrangement = datastore.add_policy_arrangement(
arrangement.expiration.datetime(),
id=arrangement.id.hex().encode(),
alice_verifying_key=arrangement.alice.stamp,
session=session,
)
# TODO: Make the rest of this logic actually work - do something here
# to decide if this Arrangement is worth accepting.
headers = {"Content-Type": "application/octet-stream"}
# TODO: Make this a legit response #234.
return Response(
b"This will eventually be an actual acceptance of the arrangement.",
headers=headers,
)
@rest_app.route("/kFrag/<id_as_hex>", methods=["POST"])
def set_policy(id_as_hex):
"""
REST endpoint for setting a kFrag.
TODO: Instead of taking a Request, use the apistar typing system to type
a payload and validate / split it.
TODO: Validate that the kfrag being saved is pursuant to an approved
Policy (see #121).
"""
policy_message_kit = UmbralMessageKit.from_bytes(request.data)
alices_verifying_key = policy_message_kit.sender_verifying_key
alice = _alice_class.from_public_keys(verifying_key=alices_verifying_key)
try:
cleartext = this_node.verify_from(alice, policy_message_kit, decrypt=True)
except InvalidSignature:
# TODO: Perhaps we log this?
return Response(status_code=400)
kfrag = KFrag.from_bytes(cleartext)
if not kfrag.verify(signing_pubkey=alices_verifying_key):
raise InvalidSignature("{} is invalid".format(kfrag))
with ThreadedSession(db_engine) as session:
datastore.attach_kfrag_to_saved_arrangement(
alice, id_as_hex, kfrag, session=session
)
# TODO: Sign the arrangement here. #495
return "" # TODO: Return A 200, with whatever policy metadata.
@rest_app.route("/kFrag/<id_as_hex>", methods=["DELETE"])
def revoke_arrangement(id_as_hex):
"""
REST endpoint for revoking/deleting a KFrag from a node.
"""
from nucypher.policy.collections import Revocation
revocation = Revocation.from_bytes(request.data)
log.info(
"Received revocation: {} -- for arrangement {}".format(
bytes(revocation).hex(), id_as_hex
)
)
try:
with ThreadedSession(db_engine) as session:
# Verify the Notice was signed by Alice
policy_arrangement = datastore.get_policy_arrangement(
id_as_hex.encode(), session=session
)
alice_pubkey = UmbralPublicKey.from_bytes(
policy_arrangement.alice_verifying_key.key_data
)
# Check that the request is the same for the provided revocation
if id_as_hex != revocation.arrangement_id.hex():
log.debug(
"Couldn't identify an arrangement with id {}".format(id_as_hex)
)
return Response(status_code=400)
elif revocation.verify_signature(alice_pubkey):
datastore.del_policy_arrangement(
id_as_hex.encode(), session=session
)
except (NotFound, InvalidSignature) as e:
log.debug("Exception attempting to revoke: {}".format(e))
return Response(
response="KFrag not found or revocation signature is invalid.",
status=404,
)
else:
log.info("KFrag successfully removed.")
return Response(response="KFrag deleted!", status=200)
@rest_app.route("/kFrag/<id_as_hex>/reencrypt", methods=["POST"])
def reencrypt_via_rest(id_as_hex):
from nucypher.policy.collections import WorkOrder # Avoid circular import
arrangement_id = binascii.unhexlify(id_as_hex)
try:
with ThreadedSession(db_engine) as session:
policy_arrangement = datastore.get_policy_arrangement(
arrangement_id=id_as_hex.encode(), session=session
)
except NotFound:
return Response(response=arrangement_id, status=404)
kfrag_bytes = policy_arrangement.kfrag # Careful! :-)
verifying_key_bytes = policy_arrangement.alice_verifying_key.key_data
# TODO: Push this to a lower level. Perhaps to Ursula character? #619
kfrag = KFrag.from_bytes(kfrag_bytes)
alices_verifying_key = UmbralPublicKey.from_bytes(verifying_key_bytes)
alices_address = canonical_address_from_umbral_key(alices_verifying_key)
work_order = WorkOrder.from_rest_payload(
arrangement_id=arrangement_id,
rest_payload=request.data,
ursula=this_node,
alice_address=alices_address,
)
log.info(
f"Work Order from {work_order.bob}, signed {work_order.receipt_signature}"
)
cfrag_byte_stream = b""
for task in work_order.tasks:
# Ursula signs on top of Bob's signature of each task.
# Now both are committed to the same task. See #259.
reencryption_metadata = bytes(this_node.stamp(bytes(task.signature)))
capsule = task.capsule
capsule.set_correctness_keys(verifying=alices_verifying_key)
cfrag = pre.reencrypt(kfrag, capsule, metadata=reencryption_metadata)
log.info(f"Re-encrypting for {capsule}, made {cfrag}.")
# Finally, Ursula commits to her result
reencryption_signature = this_node.stamp(bytes(cfrag))
cfrag_byte_stream += (
VariableLengthBytestring(cfrag) + reencryption_signature
)
# TODO: Put this in Ursula's datastore
this_node._work_orders.append(work_order)
headers = {"Content-Type": "application/octet-stream"}
return Response(response=cfrag_byte_stream, headers=headers)
@rest_app.route("/treasure_map/<treasure_map_id>")
def provide_treasure_map(treasure_map_id):
headers = {"Content-Type": "application/octet-stream"}
treasure_map_index = bytes.fromhex(treasure_map_id)
try:
treasure_map = this_node.treasure_maps[treasure_map_index]
response = Response(bytes(treasure_map), headers=headers)
log.info(
"{} providing TreasureMap {}".format(
this_node.nickname, treasure_map_id
)
)
except KeyError:
log.info(
"{} doesn't have requested TreasureMap {}".format(
this_node.stamp, treasure_map_id
)
)
response = Response(
"No Treasure Map with ID {}".format(treasure_map_id),
status=404,
headers=headers,
)
return response
@rest_app.route("/treasure_map/<treasure_map_id>", methods=["POST"])
def receive_treasure_map(treasure_map_id):
from nucypher.policy.collections import TreasureMap
try:
treasure_map = TreasureMap.from_bytes(
bytes_representation=request.data, verify=True
)
except TreasureMap.InvalidSignature:
do_store = False
else:
do_store = treasure_map.public_id() == treasure_map_id
if do_store:
log.info("{} storing TreasureMap {}".format(this_node, treasure_map_id))
# TODO 341 - what if we already have this TreasureMap?
treasure_map_index = bytes.fromhex(treasure_map_id)
this_node.treasure_maps[treasure_map_index] = treasure_map
return Response(bytes(treasure_map), status=202)
else:
# TODO: Make this a proper 500 or whatever.
log.info("Bad TreasureMap ID; not storing {}".format(treasure_map_id))
assert False
@rest_app.route("/status")
def status():
headers = {"Content-Type": "text/html", "charset": "utf-8"}
previous_states = list(reversed(this_node.known_nodes.states.values()))[:5]
try:
content = status_template.render(
this_node=this_node,
known_nodes=this_node.known_nodes,
previous_states=previous_states,
domains=serving_domains,
version=nucypher.__version__,
)
except Exception as e:
log.debug("Template Rendering Exception: ".format(str(e)))
raise TemplateError(str(e)) from e
return Response(response=content, headers=headers)
return rest_app, datastore
|
def make_rest_app(
db_filepath: str, this_node, serving_domains, log=Logger("http-application-layer")
) -> Tuple:
forgetful_node_storage = ForgetfulNodeStorage(
federated_only=this_node.federated_only
)
from nucypher.keystore import keystore
from nucypher.keystore.db import Base
from sqlalchemy.engine import create_engine
log.info("Starting datastore {}".format(db_filepath))
# See: https://docs.sqlalchemy.org/en/rel_0_9/dialects/sqlite.html#connect-strings
if db_filepath:
db_uri = f"sqlite:///{db_filepath}"
else:
db_uri = "sqlite://" # TODO: Is this a sane default? See #667
engine = create_engine(db_uri)
Base.metadata.create_all(engine)
datastore = keystore.KeyStore(engine)
db_engine = engine
from nucypher.characters.lawful import Alice, Ursula
_alice_class = Alice
_node_class = Ursula
rest_app = Flask("ursula-service")
@rest_app.route("/public_information")
def public_information():
"""
REST endpoint for public keys and address.
"""
response = Response(
response=bytes(this_node), mimetype="application/octet-stream"
)
return response
@rest_app.route("/node_metadata", methods=["GET"])
def all_known_nodes():
headers = {"Content-Type": "application/octet-stream"}
if this_node.known_nodes.checksum is NO_KNOWN_NODES:
return Response(b"", headers=headers, status=204)
payload = this_node.known_nodes.snapshot()
ursulas_as_vbytes = (VariableLengthBytestring(n) for n in this_node.known_nodes)
ursulas_as_bytes = bytes().join(bytes(u) for u in ursulas_as_vbytes)
ursulas_as_bytes += VariableLengthBytestring(bytes(this_node))
payload += ursulas_as_bytes
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
@rest_app.route("/node_metadata", methods=["POST"])
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
log.debug(
"Learner already knew fleet state {}; doing nothing.".format(
learner_fleet_state
)
)
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
nodes = _node_class.batch_from_bytes(
request.data,
registry=this_node.registry,
federated_only=this_node.federated_only,
) # TODO: 466
# TODO: This logic is basically repeated in learn_from_teacher_node and remember_node.
# Let's find a better way. #555
for node in nodes:
if not set(serving_domains).intersection(set(node.serving_domains)):
continue # This node is not serving any of our domains.
if node in this_node.known_nodes:
if (
node.timestamp
<= this_node.known_nodes[node.checksum_address].timestamp
):
continue
@crosstown_traffic()
def learn_about_announced_nodes():
try:
certificate_filepath = (
forgetful_node_storage.store_node_certificate(
certificate=node.certificate
)
)
node.verify_node(
this_node.network_middleware,
accept_federated_only=this_node.federated_only, # TODO: 466
certificate_filepath=certificate_filepath,
)
# Suspicion
except node.SuspiciousActivity as e:
# TODO: Include data about caller?
# TODO: Account for possibility that stamp, rather than interface, was bad.
# TODO: Maybe also record the bytes representation separately to disk?
message = f"Suspicious Activity about {node}: {str(e)}. Announced via REST."
log.warn(message)
this_node.suspicious_activities_witnessed["vladimirs"].append(node)
except NodeSeemsToBeDown as e:
# This is a rather odd situation - this node *just* contacted us and asked to be verified. Where'd it go? Maybe a NAT problem?
log.info(
f"Node announced itself to us just now, but seems to be down: {node}. Response was {e}."
)
log.debug(f"Phantom node certificate: {node.certificate}")
# Async Sentinel
except Exception as e:
log.critical(
f"This exception really needs to be handled differently: {e}"
)
raise
# Believable
else:
log.info("Learned about previously unknown node: {}".format(node))
this_node.remember_node(node)
# TODO: Record new fleet state
# Cleanup
finally:
forgetful_node_storage.forget()
# TODO: What's the right status code here? 202? Different if we already knew about the node?
return all_known_nodes()
@rest_app.route("/consider_arrangement", methods=["POST"])
def consider_arrangement():
from nucypher.policy.policies import Arrangement
arrangement = Arrangement.from_bytes(request.data)
with ThreadedSession(db_engine) as session:
new_policy_arrangement = datastore.add_policy_arrangement(
arrangement.expiration.datetime(),
id=arrangement.id.hex().encode(),
alice_verifying_key=arrangement.alice.stamp,
session=session,
)
# TODO: Make the rest of this logic actually work - do something here
# to decide if this Arrangement is worth accepting.
headers = {"Content-Type": "application/octet-stream"}
# TODO: Make this a legit response #234.
return Response(
b"This will eventually be an actual acceptance of the arrangement.",
headers=headers,
)
@rest_app.route("/kFrag/<id_as_hex>", methods=["POST"])
def set_policy(id_as_hex):
"""
REST endpoint for setting a kFrag.
TODO: Instead of taking a Request, use the apistar typing system to type
a payload and validate / split it.
TODO: Validate that the kfrag being saved is pursuant to an approved
Policy (see #121).
"""
policy_message_kit = UmbralMessageKit.from_bytes(request.data)
alices_verifying_key = policy_message_kit.sender_verifying_key
alice = _alice_class.from_public_keys(verifying_key=alices_verifying_key)
try:
cleartext = this_node.verify_from(alice, policy_message_kit, decrypt=True)
except InvalidSignature:
# TODO: Perhaps we log this?
return Response(status_code=400)
kfrag = KFrag.from_bytes(cleartext)
if not kfrag.verify(signing_pubkey=alices_verifying_key):
raise InvalidSignature("{} is invalid".format(kfrag))
with ThreadedSession(db_engine) as session:
datastore.attach_kfrag_to_saved_arrangement(
alice, id_as_hex, kfrag, session=session
)
# TODO: Sign the arrangement here. #495
return "" # TODO: Return A 200, with whatever policy metadata.
@rest_app.route("/kFrag/<id_as_hex>", methods=["DELETE"])
def revoke_arrangement(id_as_hex):
"""
REST endpoint for revoking/deleting a KFrag from a node.
"""
from nucypher.policy.collections import Revocation
revocation = Revocation.from_bytes(request.data)
log.info(
"Received revocation: {} -- for arrangement {}".format(
bytes(revocation).hex(), id_as_hex
)
)
try:
with ThreadedSession(db_engine) as session:
# Verify the Notice was signed by Alice
policy_arrangement = datastore.get_policy_arrangement(
id_as_hex.encode(), session=session
)
alice_pubkey = UmbralPublicKey.from_bytes(
policy_arrangement.alice_verifying_key.key_data
)
# Check that the request is the same for the provided revocation
if id_as_hex != revocation.arrangement_id.hex():
log.debug(
"Couldn't identify an arrangement with id {}".format(id_as_hex)
)
return Response(status_code=400)
elif revocation.verify_signature(alice_pubkey):
datastore.del_policy_arrangement(
id_as_hex.encode(), session=session
)
except (NotFound, InvalidSignature) as e:
log.debug("Exception attempting to revoke: {}".format(e))
return Response(
response="KFrag not found or revocation signature is invalid.",
status=404,
)
else:
log.info("KFrag successfully removed.")
return Response(response="KFrag deleted!", status=200)
@rest_app.route("/kFrag/<id_as_hex>/reencrypt", methods=["POST"])
def reencrypt_via_rest(id_as_hex):
from nucypher.policy.collections import WorkOrder # Avoid circular import
arrangement_id = binascii.unhexlify(id_as_hex)
try:
with ThreadedSession(db_engine) as session:
policy_arrangement = datastore.get_policy_arrangement(
arrangement_id=id_as_hex.encode(), session=session
)
except NotFound:
return Response(response=arrangement_id, status=404)
kfrag_bytes = policy_arrangement.kfrag # Careful! :-)
verifying_key_bytes = policy_arrangement.alice_verifying_key.key_data
# TODO: Push this to a lower level. Perhaps to Ursula character? #619
kfrag = KFrag.from_bytes(kfrag_bytes)
alices_verifying_key = UmbralPublicKey.from_bytes(verifying_key_bytes)
alices_address = canonical_address_from_umbral_key(alices_verifying_key)
work_order = WorkOrder.from_rest_payload(
arrangement_id=arrangement_id,
rest_payload=request.data,
ursula=this_node,
alice_address=alices_address,
)
log.info(
f"Work Order from {work_order.bob}, signed {work_order.receipt_signature}"
)
cfrag_byte_stream = b""
for task in work_order.tasks:
# Ursula signs on top of Bob's signature of each task.
# Now both are committed to the same task. See #259.
reencryption_metadata = bytes(this_node.stamp(bytes(task.signature)))
capsule = task.capsule
capsule.set_correctness_keys(verifying=alices_verifying_key)
cfrag = pre.reencrypt(kfrag, capsule, metadata=reencryption_metadata)
log.info(f"Re-encrypting for {capsule}, made {cfrag}.")
# Finally, Ursula commits to her result
reencryption_signature = this_node.stamp(bytes(cfrag))
cfrag_byte_stream += (
VariableLengthBytestring(cfrag) + reencryption_signature
)
# TODO: Put this in Ursula's datastore
this_node._work_orders.append(work_order)
headers = {"Content-Type": "application/octet-stream"}
return Response(response=cfrag_byte_stream, headers=headers)
@rest_app.route("/treasure_map/<treasure_map_id>")
def provide_treasure_map(treasure_map_id):
headers = {"Content-Type": "application/octet-stream"}
treasure_map_index = bytes.fromhex(treasure_map_id)
try:
treasure_map = this_node.treasure_maps[treasure_map_index]
response = Response(bytes(treasure_map), headers=headers)
log.info(
"{} providing TreasureMap {}".format(
this_node.nickname, treasure_map_id
)
)
except KeyError:
log.info(
"{} doesn't have requested TreasureMap {}".format(
this_node.stamp, treasure_map_id
)
)
response = Response(
"No Treasure Map with ID {}".format(treasure_map_id),
status=404,
headers=headers,
)
return response
@rest_app.route("/treasure_map/<treasure_map_id>", methods=["POST"])
def receive_treasure_map(treasure_map_id):
from nucypher.policy.collections import TreasureMap
try:
treasure_map = TreasureMap.from_bytes(
bytes_representation=request.data, verify=True
)
except TreasureMap.InvalidSignature:
do_store = False
else:
do_store = treasure_map.public_id() == treasure_map_id
if do_store:
log.info("{} storing TreasureMap {}".format(this_node, treasure_map_id))
# TODO 341 - what if we already have this TreasureMap?
treasure_map_index = bytes.fromhex(treasure_map_id)
this_node.treasure_maps[treasure_map_index] = treasure_map
return Response(bytes(treasure_map), status=202)
else:
# TODO: Make this a proper 500 or whatever.
log.info("Bad TreasureMap ID; not storing {}".format(treasure_map_id))
assert False
@rest_app.route("/status")
def status():
headers = {"Content-Type": "text/html", "charset": "utf-8"}
previous_states = list(reversed(this_node.known_nodes.states.values()))[:5]
try:
content = status_template.render(
this_node=this_node,
known_nodes=this_node.known_nodes,
previous_states=previous_states,
domains=serving_domains,
version=nucypher.__version__,
)
except Exception as e:
log.debug("Template Rendering Exception: ".format(str(e)))
raise TemplateError(str(e)) from e
return Response(response=content, headers=headers)
return rest_app, datastore
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
log.debug(
"Learner already knew fleet state {}; doing nothing.".format(
learner_fleet_state
)
)
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
nodes = _node_class.batch_from_bytes(
request.data,
registry=this_node.registry,
federated_only=this_node.federated_only,
) # TODO: 466
# TODO: This logic is basically repeated in learn_from_teacher_node and remember_node.
# Let's find a better way. #555
for node in nodes:
if not set(serving_domains).intersection(set(node.serving_domains)):
continue # This node is not serving any of our domains.
if node in this_node.known_nodes:
if node.timestamp <= this_node.known_nodes[node.checksum_address].timestamp:
continue
@crosstown_traffic()
def learn_about_announced_nodes():
try:
certificate_filepath = forgetful_node_storage.store_node_certificate(
certificate=node.certificate
)
node.verify_node(
this_node.network_middleware,
registry=this_node.registry,
certificate_filepath=certificate_filepath,
)
# Suspicion
except node.SuspiciousActivity as e:
# TODO: Include data about caller?
# TODO: Account for possibility that stamp, rather than interface, was bad.
# TODO: Maybe also record the bytes representation separately to disk?
message = (
f"Suspicious Activity about {node}: {str(e)}. Announced via REST."
)
log.warn(message)
this_node.suspicious_activities_witnessed["vladimirs"].append(node)
except NodeSeemsToBeDown as e:
# This is a rather odd situation - this node *just* contacted us and asked to be verified. Where'd it go? Maybe a NAT problem?
log.info(
f"Node announced itself to us just now, but seems to be down: {node}. Response was {e}."
)
log.debug(f"Phantom node certificate: {node.certificate}")
# Async Sentinel
except Exception as e:
log.critical(
f"This exception really needs to be handled differently: {e}"
)
raise
# Believable
else:
log.info("Learned about previously unknown node: {}".format(node))
this_node.remember_node(node)
# TODO: Record new fleet state
# Cleanup
finally:
forgetful_node_storage.forget()
# TODO: What's the right status code here? 202? Different if we already knew about the node?
return all_known_nodes()
|
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
log.debug(
"Learner already knew fleet state {}; doing nothing.".format(
learner_fleet_state
)
)
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
nodes = _node_class.batch_from_bytes(
request.data,
registry=this_node.registry,
federated_only=this_node.federated_only,
) # TODO: 466
# TODO: This logic is basically repeated in learn_from_teacher_node and remember_node.
# Let's find a better way. #555
for node in nodes:
if not set(serving_domains).intersection(set(node.serving_domains)):
continue # This node is not serving any of our domains.
if node in this_node.known_nodes:
if node.timestamp <= this_node.known_nodes[node.checksum_address].timestamp:
continue
@crosstown_traffic()
def learn_about_announced_nodes():
try:
certificate_filepath = forgetful_node_storage.store_node_certificate(
certificate=node.certificate
)
node.verify_node(
this_node.network_middleware,
accept_federated_only=this_node.federated_only, # TODO: 466
certificate_filepath=certificate_filepath,
)
# Suspicion
except node.SuspiciousActivity as e:
# TODO: Include data about caller?
# TODO: Account for possibility that stamp, rather than interface, was bad.
# TODO: Maybe also record the bytes representation separately to disk?
message = (
f"Suspicious Activity about {node}: {str(e)}. Announced via REST."
)
log.warn(message)
this_node.suspicious_activities_witnessed["vladimirs"].append(node)
except NodeSeemsToBeDown as e:
# This is a rather odd situation - this node *just* contacted us and asked to be verified. Where'd it go? Maybe a NAT problem?
log.info(
f"Node announced itself to us just now, but seems to be down: {node}. Response was {e}."
)
log.debug(f"Phantom node certificate: {node.certificate}")
# Async Sentinel
except Exception as e:
log.critical(
f"This exception really needs to be handled differently: {e}"
)
raise
# Believable
else:
log.info("Learned about previously unknown node: {}".format(node))
this_node.remember_node(node)
# TODO: Record new fleet state
# Cleanup
finally:
forgetful_node_storage.forget()
# TODO: What's the right status code here? 202? Different if we already knew about the node?
return all_known_nodes()
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def learn_about_announced_nodes():
try:
certificate_filepath = forgetful_node_storage.store_node_certificate(
certificate=node.certificate
)
node.verify_node(
this_node.network_middleware,
registry=this_node.registry,
certificate_filepath=certificate_filepath,
)
# Suspicion
except node.SuspiciousActivity as e:
# TODO: Include data about caller?
# TODO: Account for possibility that stamp, rather than interface, was bad.
# TODO: Maybe also record the bytes representation separately to disk?
message = f"Suspicious Activity about {node}: {str(e)}. Announced via REST."
log.warn(message)
this_node.suspicious_activities_witnessed["vladimirs"].append(node)
except NodeSeemsToBeDown as e:
# This is a rather odd situation - this node *just* contacted us and asked to be verified. Where'd it go? Maybe a NAT problem?
log.info(
f"Node announced itself to us just now, but seems to be down: {node}. Response was {e}."
)
log.debug(f"Phantom node certificate: {node.certificate}")
# Async Sentinel
except Exception as e:
log.critical(f"This exception really needs to be handled differently: {e}")
raise
# Believable
else:
log.info("Learned about previously unknown node: {}".format(node))
this_node.remember_node(node)
# TODO: Record new fleet state
# Cleanup
finally:
forgetful_node_storage.forget()
|
def learn_about_announced_nodes():
try:
certificate_filepath = forgetful_node_storage.store_node_certificate(
certificate=node.certificate
)
node.verify_node(
this_node.network_middleware,
accept_federated_only=this_node.federated_only, # TODO: 466
certificate_filepath=certificate_filepath,
)
# Suspicion
except node.SuspiciousActivity as e:
# TODO: Include data about caller?
# TODO: Account for possibility that stamp, rather than interface, was bad.
# TODO: Maybe also record the bytes representation separately to disk?
message = f"Suspicious Activity about {node}: {str(e)}. Announced via REST."
log.warn(message)
this_node.suspicious_activities_witnessed["vladimirs"].append(node)
except NodeSeemsToBeDown as e:
# This is a rather odd situation - this node *just* contacted us and asked to be verified. Where'd it go? Maybe a NAT problem?
log.info(
f"Node announced itself to us just now, but seems to be down: {node}. Response was {e}."
)
log.debug(f"Phantom node certificate: {node.certificate}")
# Async Sentinel
except Exception as e:
log.critical(f"This exception really needs to be handled differently: {e}")
raise
# Believable
else:
log.info("Learned about previously unknown node: {}".format(node))
this_node.remember_node(node)
# TODO: Record new fleet state
# Cleanup
finally:
forgetful_node_storage.forget()
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def consider_arrangement(self, network_middleware, ursula, arrangement) -> bool:
try:
ursula.verify_node(
network_middleware, registry=self.alice.registry
) # From the perspective of alice.
except ursula.InvalidNode:
# TODO: What do we actually do here? Report this at least (355)?
# Maybe also have another bucket for invalid nodes?
# It's possible that nothing sordid is happening here;
# this node may be updating its interface info or rotating a signing key
# and we learned about a previous one.
raise
negotiation_response = network_middleware.consider_arrangement(
arrangement=arrangement
)
# TODO: check out the response: need to assess the result and see if we're actually good to go.
arrangement_is_accepted = negotiation_response.status_code == 200
bucket = (
self._accepted_arrangements
if arrangement_is_accepted
else self._rejected_arrangements
)
bucket.add(arrangement)
return arrangement_is_accepted
|
def consider_arrangement(self, network_middleware, ursula, arrangement) -> bool:
try:
ursula.verify_node(
network_middleware, accept_federated_only=arrangement.federated
)
except ursula.InvalidNode:
# TODO: What do we actually do here? Report this at least (355)?
# Maybe also have another bucket for invalid nodes?
# It's possible that nothing sordid is happening here;
# this node may be updating its interface info or rotating a signing key
# and we learned about a previous one.
raise
negotiation_response = network_middleware.consider_arrangement(
arrangement=arrangement
)
# TODO: check out the response: need to assess the result and see if we're actually good to go.
arrangement_is_accepted = negotiation_response.status_code == 200
bucket = (
self._accepted_arrangements
if arrangement_is_accepted
else self._rejected_arrangements
)
bucket.add(arrangement)
return arrangement_is_accepted
|
https://github.com/nucypher/nucypher/issues/1279
|
$ nucypher ursula confirm-activity
,ggg, gg
dP""Y8a 88 ,dPYb,
Yb, `88 88 IP'`Yb
`" 88 88 I8 8I
88 88 I8 8'
88 88 ,gggggg, ,g, gg gg I8 dP ,gggg,gg
88 88 dP""""8I ,8'8, I8 8I I8dP dP" "Y8I
88 88 ,8' 8I ,8' Yb I8, ,8I I8P i8' ,8I
Y8b,____,d88,,dP Y8,,8'_ 8) ,d8b, ,d8b,,d8b,_ ,d8, ,d8b,
"Y888888P"Y88P `Y8P' "YY8P8P8P'"Y88P"`Y88P'"Y88P"Y8888P"`Y8
the Untrusted Re-Encryption Proxy.
Decrypting NuCypher keyring...
WARNING - No Bootnodes Available
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/config.py", line 219, in wrapper
return func(config, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 268, in ursula
client_password=client_password)
File "/home/ubuntu/nucypher/nucypher/cli/actions.py", line 288, in make_cli_character
**config_args)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 215, in __call__
return self.produce(**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 103, in produce
merged_parameters = self.generate_parameters(**overrides)
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 284, in generate_parameters
merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
File "/home/ubuntu/nucypher/nucypher/config/characters.py", line 98, in dynamic_payload
return {**super().dynamic_payload, **payload}
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 385, in dynamic_payload
self.read_known_nodes() # Requires a connected blockchain to init Ursulas.
File "/home/ubuntu/nucypher/nucypher/config/node.py", line 260, in read_known_nodes
known_nodes = self.node_storage.all(federated_only=self.federated_only)
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 415, in all
federated_only=federated_only) # TODO: 466
File "/home/ubuntu/nucypher/nucypher/config/storages.py", line 384, in __read_metadata
node = Ursula.from_bytes(node_bytes, registry=registry, federated_only=federated_only)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 1175, in from_bytes
ursula = cls.from_public_keys(registry=registry, federated_only=federated_only, **node_info)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 310, in from_public_keys
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/lawful.py", line 831, in __init__
**character_kwargs)
File "/home/ubuntu/nucypher/nucypher/characters/base.py", line 116, in __init__
raise ValueError(f"Pass either federated only or registry. Got '{federated_only}'. '{registry}'")
ValueError: Pass either federated only or registry. Got 'None'. 'None'
Sentry is attempting to send 0 pending error messages
Waiting up to 2.0 seconds
Press Ctrl-C to quit
|
ValueError
|
def ursula(
click_config,
action,
dev,
quiet,
dry_run,
force,
lonely,
network,
teacher_uri,
enode,
min_stake,
rest_host,
rest_port,
db_filepath,
checksum_address,
withdraw_address,
federated_only,
poa,
config_root,
config_file,
provider_uri,
geth,
recompile_solidity,
no_registry,
registry_filepath,
value,
duration,
index,
list_,
divide,
) -> None:
"""
Manage and run an "Ursula" PRE node.
\b
Actions
-------------------------------------------------
\b
init Create a new Ursula node configuration.
view View the Ursula node's configuration.
run Run an "Ursula" node.
save-metadata Manually write node metadata to disk without running
forget Forget all known nodes.
destroy Delete Ursula node configuration.
stake Manage stakes for this node.
confirm-activity Manually confirm-activity for the current period.
collect-reward Withdraw staking reward.
"""
# Validate
if federated_only and geth:
raise click.BadOptionUsage(
option_name="--geth",
message="Federated only cannot be used with the --geth flag",
)
if click_config.debug and quiet:
raise click.BadOptionUsage(
option_name="quiet",
message="--debug and --quiet cannot be used at the same time.",
)
#
# Boring Setup Stuff
#
# Stage integrated ethereum node process TODO: Only devnet for now
ETH_NODE = NO_BLOCKCHAIN_CONNECTION.bool_value(False)
if geth:
ETH_NODE = NuCypherGethDevnetProcess(config_root=config_root)
provider_uri = ETH_NODE.provider_uri
if not click_config.json_ipc and not click_config.quiet:
click.secho(URSULA_BANNER.format(checksum_address or ""))
#
# Pre-Launch Warnings
#
if not click_config.quiet:
if dev:
click.secho("WARNING: Running in Development mode", fg="yellow")
if force:
click.secho("WARNING: Force is enabled", fg="yellow")
#
# Unauthenticated & Un-configured Ursula Configuration
#
if action == "init":
"""Create a brand-new persistent Ursula"""
if dev:
raise click.BadArgumentUsage(
"Cannot create a persistent development character"
)
if not config_root: # Flag
config_root = click_config.config_file # Envvar
# Attempts to automatically get the external IP from ifconfig.me
# If the request fails, it falls back to the standard process.
if not rest_host:
rest_host = actions.determine_external_ip_address(force=force)
new_password = click_config.get_password(confirm=True)
ursula_config = UrsulaConfiguration.generate(
password=new_password,
config_root=config_root,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
domains={network} if network else None,
federated_only=federated_only,
checksum_public_address=checksum_address,
download_registry=federated_only or no_registry,
registry_filepath=registry_filepath,
provider_process=ETH_NODE,
provider_uri=provider_uri,
poa=poa,
)
painting.paint_new_installation_help(
new_configuration=ursula_config,
config_root=config_root,
config_file=config_file,
federated_only=federated_only,
)
return
#
# Generate Configuration
#
# Development Configuration
if dev:
# TODO: Spawn POA development blockchain with geth --dev
# dev_geth_process = NuCypherGethDevProcess()
# dev_geth_process.deploy()
# dev_geth_process.start()
# ETH_NODE = dev_geth_process
# provider_uri = ETH_NODE.provider_uri
ursula_config = UrsulaConfiguration(
dev_mode=True,
domains={TEMPORARY_DOMAIN},
poa=poa,
registry_filepath=registry_filepath,
provider_process=ETH_NODE,
provider_uri=provider_uri,
checksum_public_address=checksum_address,
federated_only=federated_only,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
)
# Production Configurations
else:
# Domains -> bytes | or default
domains = set(bytes(network, encoding="utf-8")) if network else None
# Load Ursula from Configuration File
try:
ursula_config = UrsulaConfiguration.from_configuration_file(
filepath=config_file,
domains=domains,
registry_filepath=registry_filepath,
provider_process=ETH_NODE,
provider_uri=provider_uri,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
poa=poa,
federated_only=federated_only,
)
except FileNotFoundError:
return actions.handle_missing_configuration_file(
character_config_class=UrsulaConfiguration, config_file=config_file
)
except Exception as e:
if click_config.debug:
raise
else:
click.secho(str(e), fg="red", bold=True)
raise click.Abort
#
# Configured Pre-Authentication Actions
#
# Handle destruction *before* network bootstrap and character initialization below
if action == "destroy":
"""Delete all configuration files from the disk"""
if dev:
message = "'nucypher ursula destroy' cannot be used in --dev mode - There is nothing to destroy."
raise click.BadOptionUsage(option_name="--dev", message=message)
return actions.destroy_configuration(
character_config=ursula_config, force=force
)
#
# Connect to Blockchain
#
if not ursula_config.federated_only:
click_config.connect_to_blockchain(
character_configuration=ursula_config,
recompile_contracts=recompile_solidity,
)
click_config.ursula_config = (
ursula_config # Pass Ursula's config onto staking sub-command
)
#
# Authenticate
#
if dev:
# Development accounts are always unlocked and use one-time random keys.
password = None
else:
password = click_config.get_password()
click_config.unlock_keyring(
character_configuration=ursula_config, password=password
)
#
# Launch Warnings
#
if ursula_config.federated_only:
click_config.emit(message="WARNING: Running in Federated mode", color="yellow")
#
# Seed
#
teacher_nodes = actions.load_seednodes(
teacher_uris=[teacher_uri] if teacher_uri else None,
min_stake=min_stake,
federated_only=ursula_config.federated_only,
network_domains=ursula_config.domains,
network_middleware=click_config.middleware,
)
# Add ETH Bootnode or Peer
if enode:
if geth:
ursula_config.blockchain.interface.w3.geth.admin.addPeer(enode)
click.secho(f"Added ethereum peer {enode}")
else:
raise NotImplemented # TODO: other backends
#
# Produce
#
URSULA = ursula_config(password=password, known_nodes=teacher_nodes, lonely=lonely)
del password # ... under the rug
#
# Authenticated Action Switch
#
if action == "run":
"""Seed, Produce, Run!"""
# GO!
try:
click_config.emit(
message="Starting Ursula on {}".format(URSULA.rest_interface),
color="green",
bold=True,
)
# Ursula Deploy Warnings
click_config.emit(
message="Connecting to {}".format(
",".join(str(d, encoding="utf-8") for d in ursula_config.domains)
),
color="green",
bold=True,
)
if not URSULA.federated_only and URSULA.stakes:
click_config.emit(
message=f"Staking {str(URSULA.current_stake)} ~ Keep Ursula Online!",
color="blue",
bold=True,
)
if not click_config.debug:
stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA))
if dry_run:
return # <-- ABORT - (Last Chance)
# Run - Step 3
node_deployer = URSULA.get_deployer()
node_deployer.addServices()
node_deployer.catalogServers(node_deployer.hendrix)
node_deployer.run() # <--- Blocking Call (Reactor)
# Handle Crash
except Exception as e:
ursula_config.log.critical(str(e))
click_config.emit(
message="{} {}".format(e.__class__.__name__, str(e)),
color="red",
bold=True,
)
raise # Crash :-(
# Graceful Exit / Crash
finally:
click_config.emit(message="Stopping Ursula", color="green")
ursula_config.cleanup()
click_config.emit(message="Ursula Stopped", color="red")
return
elif action == "save-metadata":
"""Manually save a node self-metadata file"""
metadata_path = ursula.write_node_metadata(node=URSULA)
return click_config.emit(
message="Successfully saved node metadata to {}.".format(metadata_path),
color="green",
)
elif action == "view":
"""Paint an existing configuration to the console"""
if not URSULA.federated_only:
click.secho("BLOCKCHAIN ----------\n")
painting.paint_contract_status(
click_config=click_config, ursula_config=ursula_config
)
current_block = URSULA.blockchain.interface.w3.eth.blockNumber
click.secho(f"Block # {current_block}")
click.secho(f"NU Balance: {URSULA.token_balance}")
click.secho(f"ETH Balance: {URSULA.eth_balance}")
click.secho(
f"Current Gas Price {URSULA.blockchain.interface.w3.eth.gasPrice}"
)
# TODO: Verbose status
# click.secho(f'{URSULA.blockchain.interface.w3.eth.getBlock(current_block)}')
click.secho("CONFIGURATION --------")
response = UrsulaConfiguration._read_configuration_file(
filepath=config_file or ursula_config.config_file_location
)
return click_config.emit(response=response)
elif action == "forget":
actions.forget(configuration=ursula_config)
return
elif action == "stake":
# List Only
if list_:
if not URSULA.stakes:
click.echo(
f"There are no active stakes for {URSULA.checksum_public_address}"
)
else:
painting.paint_stakes(stakes=URSULA.stakes)
return
# Divide Only
if divide:
"""Divide an existing stake by specifying the new target value and end period"""
# Validate
if not URSULA.stakes:
click.echo(
f"There are no active stakes for {URSULA.checksum_public_address}"
)
return
# Selection
if index is None:
painting.paint_stakes(stakes=URSULA.stakes)
index = click.prompt(
"Select a stake to divide",
type=click.IntRange(min=0, max=len(URSULA.stakes) - 1),
)
# Lookup the stake
current_stake = URSULA.stakes[index]
# Value
if not value:
value = click.prompt(
f"Enter target value (must be less than {str(current_stake.value)})",
type=STAKE_VALUE,
)
value = NU(value, "NU")
# Duration
if not duration:
extension = click.prompt(
"Enter number of periods to extend", type=STAKE_EXTENSION
)
else:
extension = duration
if not force:
painting.paint_staged_stake_division(
ursula=URSULA,
original_index=index,
original_stake=current_stake,
target_value=value,
extension=extension,
)
click.confirm("Is this correct?", abort=True)
modified_stake, new_stake = URSULA.divide_stake(
stake_index=index, target_value=value, additional_periods=extension
)
if not quiet:
click.secho("Successfully divided stake", fg="green")
click.secho(f"Transaction Hash ........... {new_stake.receipt}")
# Show the resulting stake list
painting.paint_stakes(stakes=URSULA.stakes)
return
# Confirm new stake init
if not force:
click.confirm("Stage a new stake?", abort=True)
# Validate balance
balance = URSULA.token_balance
if balance == 0:
click.secho(f"{ursula.checksum_public_address} has 0 NU.")
raise click.Abort
if not quiet:
click.echo(f"Current balance: {balance}")
# Gather stake value
if not value:
min_locked = NU(URSULA.economics.minimum_allowed_locked, "NuNit")
value = click.prompt(
f"Enter stake value", type=STAKE_VALUE, default=min_locked
)
else:
value = NU(int(value), "NU")
# Duration
if not quiet:
message = (
f"Minimum duration: {URSULA.economics.minimum_allowed_locked} | "
f"Maximum Duration: {URSULA.economics.maximum_allowed_locked}"
)
click.echo(message)
if not duration:
duration = click.prompt(
"Enter stake duration in periods (1 Period = 24 Hours)",
type=STAKE_DURATION,
)
start_period = URSULA.miner_agent.get_current_period()
end_period = start_period + duration
# Review
if not force:
painting.paint_staged_stake(
ursula=URSULA,
stake_value=value,
duration=duration,
start_period=start_period,
end_period=end_period,
)
if not dev:
actions.confirm_staged_stake(
ursula=URSULA, value=value, duration=duration
)
# Last chance to bail
if not force:
click.confirm("Publish staged stake to the blockchain?", abort=True)
stake = URSULA.initialize_stake(amount=int(value), lock_periods=duration)
painting.paint_staking_confirmation(
ursula=URSULA, transactions=stake.transactions
)
return
elif action == "confirm-activity":
if not URSULA.stakes:
click.secho(
"There are no active stakes for {}".format(
URSULA.checksum_public_address
)
)
return
URSULA.miner_agent.confirm_activity(node_address=URSULA.checksum_public_address)
return
elif action == "collect-reward":
"""Withdraw staking reward to the specified wallet address"""
if not force:
click.confirm(
f"Send {URSULA.calculate_reward()} to {URSULA.checksum_public_address}?"
)
URSULA.collect_policy_reward(
collector_address=withdraw_address or checksum_address
)
URSULA.collect_staking_reward()
else:
raise click.BadArgumentUsage("No such argument {}".format(action))
|
def ursula(
click_config,
action,
dev,
quiet,
dry_run,
force,
lonely,
network,
teacher_uri,
enode,
min_stake,
rest_host,
rest_port,
db_filepath,
checksum_address,
withdraw_address,
federated_only,
poa,
config_root,
config_file,
provider_uri,
geth,
recompile_solidity,
no_registry,
registry_filepath,
value,
duration,
index,
list_,
divide,
) -> None:
"""
Manage and run an "Ursula" PRE node.
\b
Actions
-------------------------------------------------
\b
init Create a new Ursula node configuration.
view View the Ursula node's configuration.
run Run an "Ursula" node.
save-metadata Manually write node metadata to disk without running
forget Forget all known nodes.
destroy Delete Ursula node configuration.
stake Manage stakes for this node.
confirm-activity Manually confirm-activity for the current period.
collect-reward Withdraw staking reward.
"""
# Validate
if federated_only and geth:
raise click.BadOptionUsage(
option_name="--geth",
message="Federated only cannot be used with the --geth flag",
)
if click_config.debug and quiet:
raise click.BadOptionUsage(
option_name="quiet",
message="--debug and --quiet cannot be used at the same time.",
)
#
# Boring Setup Stuff
#
# Stage integrated ethereum node process TODO: Only devnet for now
ETH_NODE = NO_BLOCKCHAIN_CONNECTION.bool_value(False)
if geth:
ETH_NODE = NuCypherGethDevnetProcess(config_root=config_root)
provider_uri = ETH_NODE.provider_uri
if not click_config.json_ipc and not click_config.quiet:
click.secho(URSULA_BANNER.format(checksum_address or ""))
#
# Pre-Launch Warnings
#
if not click_config.quiet:
if dev:
click.secho("WARNING: Running in Development mode", fg="yellow")
if force:
click.secho("WARNING: Force is enabled", fg="yellow")
#
# Unauthenticated & Un-configured Ursula Configuration
#
if action == "init":
"""Create a brand-new persistent Ursula"""
if dev:
raise click.BadArgumentUsage(
"Cannot create a persistent development character"
)
if not config_root: # Flag
config_root = click_config.config_file # Envvar
# Attempts to automatically get the external IP from ifconfig.me
# If the request fails, it falls back to the standard process.
if not rest_host:
rest_host = actions.determine_external_ip_address(force=force)
new_password = click_config.get_password(confirm=True)
ursula_config = UrsulaConfiguration.generate(
password=new_password,
config_root=config_root,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
domains={network} if network else None,
federated_only=federated_only,
checksum_public_address=checksum_address,
download_registry=federated_only or no_registry,
registry_filepath=registry_filepath,
provider_process=ETH_NODE,
provider_uri=provider_uri,
poa=poa,
)
painting.paint_new_installation_help(
new_configuration=ursula_config,
config_root=config_root,
config_file=config_file,
federated_only=federated_only,
)
return
#
# Generate Configuration
#
# Development Configuration
if dev:
# TODO: Spawn POA development blockchain with geth --dev
# dev_geth_process = NuCypherGethDevProcess()
# dev_geth_process.deploy()
# dev_geth_process.start()
# ETH_NODE = dev_geth_process
# provider_uri = ETH_NODE.provider_uri
ursula_config = UrsulaConfiguration(
dev_mode=True,
domains={TEMPORARY_DOMAIN},
poa=poa,
registry_filepath=registry_filepath,
provider_process=ETH_NODE,
provider_uri=provider_uri,
checksum_public_address=checksum_address,
federated_only=federated_only,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
)
# Production Configurations
else:
# Domains -> bytes | or default
domains = set(bytes(network, encoding="utf-8")) if network else None
# Load Ursula from Configuration File
try:
ursula_config = UrsulaConfiguration.from_configuration_file(
filepath=config_file,
domains=domains,
registry_filepath=registry_filepath,
provider_process=ETH_NODE,
provider_uri=provider_uri,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
poa=poa,
federated_only=federated_only,
)
except FileNotFoundError:
return actions.handle_missing_configuration_file(
character_config_class=UrsulaConfiguration, config_file=config_file
)
except Exception as e:
if click_config.debug:
raise
else:
click.secho(str(e), fg="red", bold=True)
raise click.Abort
#
# Configured Pre-Authentication Actions
#
# Handle destruction *before* network bootstrap and character initialization below
if action == "destroy":
"""Delete all configuration files from the disk"""
if dev:
message = "'nucypher ursula destroy' cannot be used in --dev mode - There is nothing to destroy."
raise click.BadOptionUsage(option_name="--dev", message=message)
return actions.destroy_configuration(
character_config=ursula_config, force=force
)
#
# Connect to Blockchain
#
if not ursula_config.federated_only:
click_config.connect_to_blockchain(
character_configuration=ursula_config,
recompile_contracts=recompile_solidity,
)
click_config.ursula_config = (
ursula_config # Pass Ursula's config onto staking sub-command
)
#
# Authenticate
#
if dev:
# Development accounts are always unlocked and use one-time random keys.
password = None
else:
password = click_config.get_password()
click_config.unlock_keyring(
character_configuration=ursula_config, password=password
)
#
# Launch Warnings
#
if ursula_config.federated_only:
click_config.emit(message="WARNING: Running in Federated mode", color="yellow")
#
# Seed
#
teacher_nodes = actions.load_seednodes(
teacher_uris=[teacher_uri] if teacher_uri else None,
min_stake=min_stake,
federated_only=ursula_config.federated_only,
network_domains=ursula_config.domains,
network_middleware=click_config.middleware,
)
# Add ETH Bootnode or Peer
if enode:
if geth:
ursula_config.blockchain.interface.w3.geth.admin.addPeer(enode)
click.secho(f"Added ethereum peer {enode}")
else:
raise NotImplemented # TODO: other backends
#
# Produce
#
URSULA = ursula_config(password=password, known_nodes=teacher_nodes, lonely=lonely)
del password # ... under the rug
#
# Authenticated Action Switch
#
if action == "run":
"""Seed, Produce, Run!"""
# GO!
try:
click_config.emit(
message="Starting Ursula on {}".format(URSULA.rest_interface),
color="green",
bold=True,
)
# Ursula Deploy Warnings
click_config.emit(
message="Connecting to {}".format(
",".join(str(d, encoding="utf-8") for d in ursula_config.domains)
),
color="green",
bold=True,
)
if not URSULA.federated_only and URSULA.stakes:
click_config.emit(
message=f"Staking {str(URSULA.current_stake)} ~ Keep Ursula Online!",
color="blue",
bold=True,
)
if not click_config.debug:
stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA))
if dry_run:
return # <-- ABORT - (Last Chance)
# Run - Step 3
node_deployer = URSULA.get_deployer()
node_deployer.addServices()
node_deployer.catalogServers(node_deployer.hendrix)
node_deployer.run() # <--- Blocking Call (Reactor)
# Handle Crash
except Exception as e:
ursula_config.log.critical(str(e))
click_config.emit(
message="{} {}".format(e.__class__.__name__, str(e)),
color="red",
bold=True,
)
raise # Crash :-(
# Graceful Exit / Crash
finally:
click_config.emit(message="Stopping Ursula", color="green")
ursula_config.cleanup()
click_config.emit(message="Ursula Stopped", color="red")
return
elif action == "save-metadata":
"""Manually save a node self-metadata file"""
metadata_path = ursula.write_node_metadata(node=URSULA)
return click_config.emit(
message="Successfully saved node metadata to {}.".format(metadata_path),
color="green",
)
elif action == "view":
"""Paint an existing configuration to the console"""
if not URSULA.federated_only:
click.secho("BLOCKCHAIN ----------\n")
painting.paint_contract_status(
click_config=click_config, ursula_config=ursula_config
)
current_block = URSULA.blockchain.interface.w3.eth.blockNumber
click.secho(f"Block # {current_block}")
click.secho(f"NU Balance: {URSULA.token_balance}")
click.secho(f"ETH Balance: {URSULA.eth_balance}")
click.secho(
f"Current Gas Price {URSULA.blockchain.interface.w3.eth.gasPrice}"
)
# TODO: Verbose status
# click.secho(f'{URSULA.blockchain.interface.w3.eth.getBlock(current_block)}')
click.secho("CONFIGURATION --------")
response = UrsulaConfiguration._read_configuration_file(
filepath=config_file or ursula_config.config_file_location
)
return click_config.emit(response=response)
elif action == "forget":
actions.forget(configuration=ursula_config)
return
elif action == "stake":
# List Only
if list_:
if not URSULA.stakes:
click.echo(
f"There are no existing stakes for {URSULA.checksum_public_address}"
)
painting.paint_stakes(stakes=URSULA.stakes)
return
# Divide Only
if divide:
"""Divide an existing stake by specifying the new target value and end period"""
# Validate
if len(URSULA.stakes) == 0:
click.secho(
"There are no active stakes for {}".format(
URSULA.checksum_public_address
)
)
return
# Selection
if index is None:
painting.paint_stakes(stakes=URSULA.stakes)
index = click.prompt(
"Select a stake to divide",
type=click.IntRange(min=0, max=len(URSULA.stakes) - 1),
)
# Lookup the stake
current_stake = URSULA.stakes[index]
# Value
if not value:
value = click.prompt(
f"Enter target value (must be less than {str(current_stake.value)})",
type=STAKE_VALUE,
)
value = NU(value, "NU")
# Duration
if not duration:
extension = click.prompt(
"Enter number of periods to extend", type=STAKE_EXTENSION
)
else:
extension = duration
if not force:
painting.paint_staged_stake_division(
ursula=URSULA,
original_index=index,
original_stake=current_stake,
target_value=value,
extension=extension,
)
click.confirm("Is this correct?", abort=True)
modified_stake, new_stake = URSULA.divide_stake(
stake_index=index, target_value=value, additional_periods=extension
)
if not quiet:
click.secho("Successfully divided stake", fg="green")
click.secho(f"Transaction Hash ........... {new_stake.receipt}")
# Show the resulting stake list
painting.paint_stakes(stakes=URSULA.stakes)
return
# Confirm new stake init
if not force:
click.confirm("Stage a new stake?", abort=True)
# Validate balance
balance = URSULA.token_balance
if balance == 0:
click.secho(f"{ursula.checksum_public_address} has 0 NU.")
raise click.Abort
if not quiet:
click.echo(f"Current balance: {balance}")
# Gather stake value
if not value:
min_locked = NU(URSULA.economics.minimum_allowed_locked, "NuNit")
value = click.prompt(
f"Enter stake value", type=STAKE_VALUE, default=min_locked
)
else:
value = NU(int(value), "NU")
# Duration
if not quiet:
message = (
f"Minimum duration: {URSULA.economics.minimum_allowed_locked} | "
f"Maximum Duration: {URSULA.economics.maximum_allowed_locked}"
)
click.echo(message)
if not duration:
duration = click.prompt(
"Enter stake duration in periods (1 Period = 24 Hours)",
type=STAKE_DURATION,
)
start_period = URSULA.miner_agent.get_current_period()
end_period = start_period + duration
# Review
if not force:
painting.paint_staged_stake(
ursula=URSULA,
stake_value=value,
duration=duration,
start_period=start_period,
end_period=end_period,
)
if not dev:
actions.confirm_staged_stake(
ursula=URSULA, value=value, duration=duration
)
# Last chance to bail
if not force:
click.confirm("Publish staged stake to the blockchain?", abort=True)
stake = URSULA.initialize_stake(amount=int(value), lock_periods=duration)
painting.paint_staking_confirmation(
ursula=URSULA, transactions=stake.transactions
)
return
elif action == "confirm-activity":
if not URSULA.stakes:
click.secho(
"There are no active stakes for {}".format(
URSULA.checksum_public_address
)
)
return
URSULA.miner_agent.confirm_activity(node_address=URSULA.checksum_public_address)
return
elif action == "collect-reward":
"""Withdraw staking reward to the specified wallet address"""
if not force:
click.confirm(
f"Send {URSULA.calculate_reward()} to {URSULA.checksum_public_address}?"
)
URSULA.collect_policy_reward(
collector_address=withdraw_address or checksum_address
)
URSULA.collect_staking_reward()
else:
raise click.BadArgumentUsage("No such argument {}".format(action))
|
https://github.com/nucypher/nucypher/issues/1025
|
Enter keyring password:
Decrypting NuCypher keyring...
Decrypting Ethereum Node Keyring...
No default teacher nodes exist for the specified network: b"b'goerli'"
WARNING - No Bootnodes Available
There are no existing stakes for 0xbE5A5646Ea2B6d47ea65dF2a13A1876997a348c4
| # | Duration | Enact | Expiration | Value
| - | ------------ | ----------- | -----------| -----
Traceback (most recent call last):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/nucypher/nucypher/cli/characters/ursula.py", line 405, in ursula
painting.paint_stakes(stakes=URSULA.stakes)
File "/home/ubuntu/nucypher/nucypher/cli/painting.py", line 263, in paint_stakes
for index, stake in enumerate(stakes):
File "/home/ubuntu/.local/share/virtualenvs/nucypher-fvStivKV/lib/python3.7/site-packages/constant_sorrow/constants.py", line 162, in __iter__
for item in self.__repr_content:
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def ursula(
click_config,
action,
dev,
quiet,
dry_run,
force,
lonely,
network,
teacher_uri,
min_stake,
rest_host,
rest_port,
db_filepath,
checksum_address,
withdraw_address,
federated_only,
poa,
config_root,
config_file,
provider_uri,
recompile_solidity,
no_registry,
registry_filepath,
value,
duration,
index,
list_,
divide,
) -> None:
"""
Manage and run an "Ursula" PRE node.
\b
Actions
-------------------------------------------------
\b
init Create a new Ursula node configuration.
view View the Ursula node's configuration.
run Run an "Ursula" node.
save-metadata Manually write node metadata to disk without running
forget Forget all known nodes.
destroy Delete Ursula node configuration.
stake Manage stakes for this node.
confirm-activity Manually confirm-activity for the current period.
collect-reward Withdraw staking reward.
"""
#
# Boring Setup Stuff
#
if not quiet:
log = Logger("ursula.cli")
if click_config.debug and quiet:
raise click.BadOptionUsage(
option_name="quiet",
message="--debug and --quiet cannot be used at the same time.",
)
if not click_config.json_ipc and not click_config.quiet:
click.secho(URSULA_BANNER.format(checksum_address or ""))
#
# Pre-Launch Warnings
#
if not click_config.quiet:
if dev:
click.secho("WARNING: Running in Development mode", fg="yellow")
if force:
click.secho("WARNING: Force is enabled", fg="yellow")
#
# Unauthenticated Configurations & Un-configured Ursula Control
#
if action == "init":
"""Create a brand-new persistent Ursula"""
if not network:
raise click.BadArgumentUsage(
"--network is required to initialize a new configuration."
)
if dev:
click_config.emitter(
message="WARNING: Using temporary storage area", color="yellow"
)
if not config_root: # Flag
config_root = click_config.config_file # Envvar
if not rest_host:
rest_host = click.prompt(
"Enter Ursula's public-facing IPv4 address"
) # TODO: Remove this step
ursula_config = UrsulaConfiguration.generate(
password=click_config.get_password(confirm=True),
config_root=config_root,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
domains={network} if network else None,
federated_only=federated_only,
checksum_public_address=checksum_address,
no_registry=federated_only or no_registry,
registry_filepath=registry_filepath,
provider_uri=provider_uri,
poa=poa,
)
painting.paint_new_installation_help(
new_configuration=ursula_config,
config_root=config_root,
config_file=config_file,
federated_only=federated_only,
)
return
#
# Configured Ursulas
#
# Development Configuration
if dev:
ursula_config = UrsulaConfiguration(
dev_mode=True,
domains={TEMPORARY_DOMAIN},
poa=poa,
registry_filepath=registry_filepath,
provider_uri=provider_uri,
checksum_public_address=checksum_address,
federated_only=federated_only,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
)
# Authenticated Configurations
else:
# Domains -> bytes | or default
domains = [bytes(network, encoding="utf-8")] if network else None
# Load Ursula from Configuration File
ursula_config = UrsulaConfiguration.from_configuration_file(
filepath=config_file,
domains=domains,
registry_filepath=registry_filepath,
provider_uri=provider_uri,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
poa=poa,
federated_only=federated_only,
)
click_config.unlock_keyring(character_configuration=ursula_config)
#
# Connect to Blockchain (Non-Federated)
#
if not ursula_config.federated_only:
click_config.connect_to_blockchain(
character_configuration=ursula_config,
recompile_contracts=recompile_solidity,
)
click_config.ursula_config = (
ursula_config # Pass Ursula's config onto staking sub-command
)
#
# Launch Warnings
#
if ursula_config.federated_only:
click_config.emitter(
message="WARNING: Running in Federated mode", color="yellow"
)
# Seed - Step 1
teacher_uris = [teacher_uri] if teacher_uri else list()
teacher_nodes = actions.load_seednodes(
teacher_uris=teacher_uris,
min_stake=min_stake,
federated_only=ursula_config.federated_only,
network_middleware=click_config.middleware,
)
# Produce - Step 2
URSULA = ursula_config(known_nodes=teacher_nodes, lonely=lonely)
#
# Action Switch
#
if action == "run":
"""Seed, Produce, Run!"""
# GO!
try:
click_config.emitter(
message="Starting Ursula on {}".format(URSULA.rest_interface),
color="green",
bold=True,
)
# Ursula Deploy Warnings
click_config.emitter(
message="Connecting to {}".format(
",".join(str(d, encoding="utf-8") for d in ursula_config.domains)
),
color="green",
bold=True,
)
if not URSULA.federated_only and URSULA.stakes:
click_config.emitter(
message=f"Staking {str(URSULA.total_staked)} ~ Keep Ursula Online!",
color="blue",
bold=True,
)
if not click_config.debug:
stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA))
if dry_run:
return # <-- ABORT -X (Last Chance)
# Run - Step 3
node_deployer = URSULA.get_deployer()
node_deployer.addServices()
node_deployer.catalogServers(node_deployer.hendrix)
node_deployer.run() # <--- Blocking Call (Reactor)
# Handle Crash
except Exception as e:
ursula_config.log.critical(str(e))
click_config.emitter(
message="{} {}".format(e.__class__.__name__, str(e)),
color="red",
bold=True,
)
raise # Crash :-(
# Graceful Exit / Crash
finally:
click_config.emitter(message="Stopping Ursula", color="green")
ursula_config.cleanup()
click_config.emitter(message="Ursula Stopped", color="red")
return
elif action == "save-metadata":
"""Manually save a node self-metadata file"""
metadata_path = ursula.write_node_metadata(node=URSULA)
return click_config.emitter(
message="Successfully saved node metadata to {}.".format(metadata_path),
color="green",
)
elif action == "view":
"""Paint an existing configuration to the console"""
response = UrsulaConfiguration._read_configuration_file(
filepath=config_file or ursula_config.config_file_location
)
return click_config.emitter(response=response)
elif action == "forget":
actions.forget(configuration=ursula_config)
return
elif action == "destroy":
"""Delete all configuration files from the disk"""
if dev:
message = "'nucypher ursula destroy' cannot be used in --dev mode"
raise click.BadOptionUsage(option_name="--dev", message=message)
destroyed_filepath = destroy_system_configuration(
config_class=UrsulaConfiguration,
config_file=config_file,
network=network,
config_root=ursula_config.config_file_location,
force=force,
)
return click_config.emitter(
message=f"Destroyed {destroyed_filepath}", color="green"
)
elif action == "stake":
# List Only
if list_:
if not URSULA.stakes:
click.echo(
f"There are no existing stakes for {URSULA.checksum_public_address}"
)
painting.paint_stakes(stakes=URSULA.stakes)
return
# Divide Only
if divide:
"""Divide an existing stake by specifying the new target value and end period"""
# Validate
if len(URSULA.stakes) == 0:
click.secho(
"There are no active stakes for {}".format(
URSULA.checksum_public_address
)
)
return
# Selection
if index is None:
painting.paint_stakes(stakes=URSULA.stakes)
index = click.prompt(
"Select a stake to divide",
type=click.IntRange(min=0, max=len(URSULA.stakes) - 1),
)
# Lookup the stake
current_stake = URSULA.stakes[index]
# Value
if not value:
value = click.prompt(
f"Enter target value (must be less than {str(current_stake.value)})",
type=STAKE_VALUE,
)
value = NU(value, "NU")
# Duration
if not duration:
extension = click.prompt(
"Enter number of periods to extend", type=STAKE_EXTENSION
)
else:
extension = duration
if not force:
painting.paint_staged_stake_division(
ursula=URSULA,
original_index=index,
original_stake=current_stake,
target_value=value,
extension=extension,
)
click.confirm("Is this correct?", abort=True)
txhash_bytes = URSULA.divide_stake(
stake_index=index, target_value=value, additional_periods=extension
)
if not quiet:
click.secho("Successfully divided stake", fg="green")
click.secho(f"Transaction Hash ........... {txhash_bytes.hex()}")
# Show the resulting stake list
painting.paint_stakes(stakes=URSULA.stakes)
return
# Confirm new stake init
if not force:
click.confirm("Stage a new stake?", abort=True)
# Validate balance
balance = URSULA.token_balance
if balance == 0:
click.secho(f"{ursula.checksum_public_address} has 0 NU.")
raise click.Abort
if not quiet:
click.echo(f"Current balance: {balance}")
# Gather stake value
if not value:
value = click.prompt(
f"Enter stake value",
type=STAKE_VALUE,
default=NU(MIN_ALLOWED_LOCKED, "NuNit"),
)
else:
value = NU(int(value), "NU")
# Duration
if not quiet:
message = "Minimum duration: {} | Maximum Duration: {}".format(
MIN_LOCKED_PERIODS, MAX_MINTING_PERIODS
)
click.echo(message)
if not duration:
duration = click.prompt(
"Enter stake duration in periods (1 Period = 24 Hours)",
type=STAKE_DURATION,
)
start_period = URSULA.miner_agent.get_current_period()
end_period = start_period + duration
# Review
if not force:
painting.paint_staged_stake(
ursula=URSULA,
stake_value=value,
duration=duration,
start_period=start_period,
end_period=end_period,
)
if not dev:
actions.confirm_staged_stake(
ursula=URSULA, value=value, duration=duration
)
# Last chance to bail
if not force:
click.confirm("Publish staged stake to the blockchain?", abort=True)
staking_transactions = URSULA.initialize_stake(
amount=int(value), lock_periods=duration
)
painting.paint_staking_confirmation(
ursula=URSULA, transactions=staking_transactions
)
return
elif action == "confirm-activity":
if not URSULA.stakes:
click.secho(
"There are no active stakes for {}".format(
URSULA.checksum_public_address
)
)
return
URSULA.miner_agent.confirm_activity(node_address=URSULA.checksum_public_address)
return
elif action == "collect-reward":
"""Withdraw staking reward to the specified wallet address"""
if not force:
click.confirm(
f"Send {URSULA.calculate_reward()} to {URSULA.checksum_public_address}?"
)
URSULA.collect_policy_reward(
collector_address=withdraw_address or checksum_address
)
URSULA.collect_staking_reward()
else:
raise click.BadArgumentUsage("No such argument {}".format(action))
|
def ursula(
click_config,
action,
dev,
quiet,
dry_run,
force,
lonely,
network,
teacher_uri,
min_stake,
rest_host,
rest_port,
db_filepath,
checksum_address,
withdraw_address,
federated_only,
poa,
config_root,
config_file,
provider_uri,
recompile_solidity,
no_registry,
registry_filepath,
value,
duration,
index,
list_,
divide,
) -> None:
"""
Manage and run an "Ursula" PRE node.
\b
Actions
-------------------------------------------------
\b
init Create a new Ursula node configuration.
view View the Ursula node's configuration.
run Run an "Ursula" node.
save-metadata Manually write node metadata to disk without running
forget Forget all known nodes.
destroy Delete Ursula node configuration.
stake Manage stakes for this node.
confirm-activity Manually confirm-activity for the current period.
collect-reward Withdraw staking reward.
"""
#
# Boring Setup Stuff
#
if not quiet:
log = Logger("ursula.cli")
if click_config.debug and quiet:
raise click.BadOptionUsage(
option_name="quiet",
message="--debug and --quiet cannot be used at the same time.",
)
if not click_config.json_ipc and not click_config.quiet:
click.secho(URSULA_BANNER.format(checksum_address or ""))
#
# Pre-Launch Warnings
#
if not click_config.quiet:
if dev:
click.secho("WARNING: Running in Development mode", fg="yellow")
if force:
click.secho("WARNING: Force is enabled", fg="yellow")
#
# Unauthenticated Configurations & Un-configured Ursula Control
#
if action == "init":
"""Create a brand-new persistent Ursula"""
if not network:
raise click.BadArgumentUsage(
"--network is required to initialize a new configuration."
)
if dev:
click_config.emitter(
message="WARNING: Using temporary storage area", color="yellow"
)
if not config_root: # Flag
config_root = click_config.config_file # Envvar
if not rest_host:
rest_host = click.prompt(
"Enter Ursula's public-facing IPv4 address"
) # TODO: Remove this step
ursula_config = UrsulaConfiguration.generate(
password=click_config.get_password(confirm=True),
config_root=config_root,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
domains={network} if network else None,
federated_only=federated_only,
checksum_public_address=checksum_address,
no_registry=federated_only or no_registry,
registry_filepath=registry_filepath,
provider_uri=provider_uri,
poa=poa,
)
painting.paint_new_installation_help(
new_configuration=ursula_config,
config_root=config_root,
config_file=config_file,
federated_only=federated_only,
)
return
#
# Configured Ursulas
#
# Development Configuration
if dev:
ursula_config = UrsulaConfiguration(
dev_mode=True,
domains={TEMPORARY_DOMAIN},
poa=poa,
registry_filepath=registry_filepath,
provider_uri=provider_uri,
checksum_public_address=checksum_address,
federated_only=federated_only,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
)
# Authenticated Configurations
else:
# Domains -> bytes | or default
domains = [bytes(network, encoding="utf-8")] if network else None
# Load Ursula from Configuration File
ursula_config = UrsulaConfiguration.from_configuration_file(
filepath=config_file,
domains=domains,
registry_filepath=registry_filepath,
provider_uri=provider_uri,
rest_host=rest_host,
rest_port=rest_port,
db_filepath=db_filepath,
poa=poa,
)
click_config.unlock_keyring(character_configuration=ursula_config)
#
# Connect to Blockchain (Non-Federated)
#
if not ursula_config.federated_only:
click_config.connect_to_blockchain(
character_configuration=ursula_config,
recompile_contracts=recompile_solidity,
)
click_config.ursula_config = (
ursula_config # Pass Ursula's config onto staking sub-command
)
#
# Launch Warnings
#
if ursula_config.federated_only:
click_config.emitter(
message="WARNING: Running in Federated mode", color="yellow"
)
# Seed - Step 1
teacher_uris = [teacher_uri] if teacher_uri else list()
teacher_nodes = actions.load_seednodes(
teacher_uris=teacher_uris,
min_stake=min_stake,
federated_only=federated_only,
network_middleware=click_config.middleware,
)
# Produce - Step 2
URSULA = ursula_config(known_nodes=teacher_nodes, lonely=lonely)
#
# Action Switch
#
if action == "run":
"""Seed, Produce, Run!"""
# GO!
try:
click_config.emitter(
message="Starting Ursula on {}".format(URSULA.rest_interface),
color="green",
bold=True,
)
# Ursula Deploy Warnings
click_config.emitter(
message="Connecting to {}".format(
",".join(str(d, encoding="utf-8") for d in ursula_config.domains)
),
color="green",
bold=True,
)
if not URSULA.federated_only and URSULA.stakes:
click_config.emitter(
message=f"Staking {str(URSULA.total_staked)} ~ Keep Ursula Online!",
color="blue",
bold=True,
)
if not click_config.debug:
stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA))
if dry_run:
return # <-- ABORT -X (Last Chance)
# Run - Step 3
node_deployer = URSULA.get_deployer()
node_deployer.addServices()
node_deployer.catalogServers(node_deployer.hendrix)
node_deployer.run() # <--- Blocking Call (Reactor)
# Handle Crash
except Exception as e:
ursula_config.log.critical(str(e))
click_config.emitter(
message="{} {}".format(e.__class__.__name__, str(e)),
color="red",
bold=True,
)
raise # Crash :-(
# Graceful Exit / Crash
finally:
click_config.emitter(message="Stopping Ursula", color="green")
ursula_config.cleanup()
click_config.emitter(message="Ursula Stopped", color="red")
return
elif action == "save-metadata":
"""Manually save a node self-metadata file"""
metadata_path = ursula.write_node_metadata(node=URSULA)
return click_config.emitter(
message="Successfully saved node metadata to {}.".format(metadata_path),
color="green",
)
elif action == "view":
"""Paint an existing configuration to the console"""
response = UrsulaConfiguration._read_configuration_file(
filepath=config_file or ursula_config.config_file_location
)
return click_config.emitter(response=response)
elif action == "forget":
actions.forget(configuration=ursula_config)
return
elif action == "destroy":
"""Delete all configuration files from the disk"""
if dev:
message = "'nucypher ursula destroy' cannot be used in --dev mode"
raise click.BadOptionUsage(option_name="--dev", message=message)
destroyed_filepath = destroy_system_configuration(
config_class=UrsulaConfiguration,
config_file=config_file,
network=network,
config_root=ursula_config.config_file_location,
force=force,
)
return click_config.emitter(
message=f"Destroyed {destroyed_filepath}", color="green"
)
elif action == "stake":
# List Only
if list_:
if not URSULA.stakes:
click.echo(
f"There are no existing stakes for {URSULA.checksum_public_address}"
)
painting.paint_stakes(stakes=URSULA.stakes)
return
# Divide Only
if divide:
"""Divide an existing stake by specifying the new target value and end period"""
# Validate
if len(URSULA.stakes) == 0:
click.secho(
"There are no active stakes for {}".format(
URSULA.checksum_public_address
)
)
return
# Selection
if index is None:
painting.paint_stakes(stakes=URSULA.stakes)
index = click.prompt(
"Select a stake to divide",
type=click.IntRange(min=0, max=len(URSULA.stakes) - 1),
)
# Lookup the stake
current_stake = URSULA.stakes[index]
# Value
if not value:
value = click.prompt(
f"Enter target value (must be less than {str(current_stake.value)})",
type=STAKE_VALUE,
)
value = NU(value, "NU")
# Duration
if not duration:
extension = click.prompt(
"Enter number of periods to extend", type=STAKE_EXTENSION
)
else:
extension = duration
if not force:
painting.paint_staged_stake_division(
ursula=URSULA,
original_index=index,
original_stake=current_stake,
target_value=value,
extension=extension,
)
click.confirm("Is this correct?", abort=True)
txhash_bytes = URSULA.divide_stake(
stake_index=index, target_value=value, additional_periods=extension
)
if not quiet:
click.secho("Successfully divided stake", fg="green")
click.secho(f"Transaction Hash ........... {txhash_bytes.hex()}")
# Show the resulting stake list
painting.paint_stakes(stakes=URSULA.stakes)
return
# Confirm new stake init
if not force:
click.confirm("Stage a new stake?", abort=True)
# Validate balance
balance = URSULA.token_balance
if balance == 0:
click.secho(f"{ursula.checksum_public_address} has 0 NU.")
raise click.Abort
if not quiet:
click.echo(f"Current balance: {balance}")
# Gather stake value
if not value:
value = click.prompt(
f"Enter stake value",
type=STAKE_VALUE,
default=NU(MIN_ALLOWED_LOCKED, "NuNit"),
)
else:
value = NU(int(value), "NU")
# Duration
if not quiet:
message = "Minimum duration: {} | Maximum Duration: {}".format(
MIN_LOCKED_PERIODS, MAX_MINTING_PERIODS
)
click.echo(message)
if not duration:
duration = click.prompt(
"Enter stake duration in periods (1 Period = 24 Hours)",
type=STAKE_DURATION,
)
start_period = URSULA.miner_agent.get_current_period()
end_period = start_period + duration
# Review
if not force:
painting.paint_staged_stake(
ursula=URSULA,
stake_value=value,
duration=duration,
start_period=start_period,
end_period=end_period,
)
if not dev:
actions.confirm_staged_stake(
ursula=URSULA, value=value, duration=duration
)
# Last chance to bail
if not force:
click.confirm("Publish staged stake to the blockchain?", abort=True)
staking_transactions = URSULA.initialize_stake(
amount=int(value), lock_periods=duration
)
painting.paint_staking_confirmation(
ursula=URSULA, transactions=staking_transactions
)
return
elif action == "confirm-activity":
if not URSULA.stakes:
click.secho(
"There are no active stakes for {}".format(
URSULA.checksum_public_address
)
)
return
URSULA.miner_agent.confirm_activity(node_address=URSULA.checksum_public_address)
return
elif action == "collect-reward":
"""Withdraw staking reward to the specified wallet address"""
if not force:
click.confirm(
f"Send {URSULA.calculate_reward()} to {URSULA.checksum_public_address}?"
)
URSULA.collect_policy_reward(
collector_address=withdraw_address or checksum_address
)
URSULA.collect_staking_reward()
else:
raise click.BadArgumentUsage("No such argument {}".format(action))
|
https://github.com/nucypher/nucypher/issues/907
|
2019-04-04T17:31:25-0700 [learning-loop#warn] Unhandled error during node learning: Traceback (most recent call last):
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/base.py", line 1272, in run
self.mainLoop()
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/base.py", line 1281, in mainLoop
self.runUntilCurrent()
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/base.py", line 902, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/task.py", line 239, in __call__
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
--- <exception caught here> ---
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/home/kieran/Git/nucypher/nucypher/network/nodes.py", line 534, in keep_learning_about_nodes
self.learn_from_teacher_node(eager=False)
File "/home/kieran/Git/nucypher/nucypher/network/nodes.py", line 746, in learn_from_teacher_node
if not self.learning_domains.intersection(node.serving_domains):
builtins.AttributeError: 'list' object has no attribute 'intersection'
|
builtins.AttributeError
|
def from_configuration_file(
cls, filepath: str = None, **overrides
) -> "NodeConfiguration":
"""Initialize a NodeConfiguration from a JSON file."""
from nucypher.config.storages import NodeStorage
node_storage_subclasses = {
storage._name: storage for storage in NodeStorage.__subclasses__()
}
if filepath is None:
filepath = cls.DEFAULT_CONFIG_FILE_LOCATION
# Read from disk
payload = cls._read_configuration_file(filepath=filepath)
# Sanity check
try:
checksum_address = payload["checksum_public_address"]
except KeyError:
raise cls.ConfigurationError(
f"No checksum address specified in configuration file {filepath}"
)
else:
if not eth_utils.is_checksum_address(checksum_address):
raise cls.ConfigurationError(
f"Address: '{checksum_address}', specified in {filepath} is not a valid checksum address."
)
# Initialize NodeStorage subclass from file (sub-configuration)
storage_payload = payload["node_storage"]
storage_type = storage_payload[NodeStorage._TYPE_LABEL]
storage_class = node_storage_subclasses[storage_type]
node_storage = storage_class.from_payload(
payload=storage_payload,
federated_only=payload["federated_only"],
serializer=cls.NODE_SERIALIZER,
deserializer=cls.NODE_DESERIALIZER,
)
# Deserialize domains to UTF-8 bytestrings
domains = set(domain.encode() for domain in payload["domains"])
payload.update(dict(node_storage=node_storage, domains=domains))
# Filter out Nones from overrides to detect, well, overrides
overrides = {k: v for k, v in overrides.items() if v is not None}
# Instantiate from merged params
node_configuration = cls(**{**payload, **overrides})
return node_configuration
|
def from_configuration_file(
cls, filepath: str = None, **overrides
) -> "NodeConfiguration":
"""Initialize a NodeConfiguration from a JSON file."""
from nucypher.config.storages import NodeStorage
node_storage_subclasses = {
storage._name: storage for storage in NodeStorage.__subclasses__()
}
if filepath is None:
filepath = cls.DEFAULT_CONFIG_FILE_LOCATION
# Read from disk
payload = cls._read_configuration_file(filepath=filepath)
# Sanity check
try:
checksum_address = payload["checksum_public_address"]
except KeyError:
raise cls.ConfigurationError(
f"No checksum address specified in configuration file {filepath}"
)
else:
if not eth_utils.is_checksum_address(checksum_address):
raise cls.ConfigurationError(
f"Address: '{checksum_address}', specified in {filepath} is not a valid checksum address."
)
# Initialize NodeStorage subclass from file (sub-configuration)
storage_payload = payload["node_storage"]
storage_type = storage_payload[NodeStorage._TYPE_LABEL]
storage_class = node_storage_subclasses[storage_type]
node_storage = storage_class.from_payload(
payload=storage_payload,
federated_only=payload["federated_only"],
serializer=cls.NODE_SERIALIZER,
deserializer=cls.NODE_DESERIALIZER,
)
# Deserialize domains to UTF-8 bytestrings
domains = list(domain.encode() for domain in payload["domains"])
payload.update(dict(node_storage=node_storage, domains=domains))
# Filter out Nones from overrides to detect, well, overrides
overrides = {k: v for k, v in overrides.items() if v is not None}
# Instantiate from merged params
node_configuration = cls(**{**payload, **overrides})
return node_configuration
|
https://github.com/nucypher/nucypher/issues/907
|
2019-04-04T17:31:25-0700 [learning-loop#warn] Unhandled error during node learning: Traceback (most recent call last):
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/base.py", line 1272, in run
self.mainLoop()
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/base.py", line 1281, in mainLoop
self.runUntilCurrent()
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/base.py", line 902, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/task.py", line 239, in __call__
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
--- <exception caught here> ---
File "/home/kieran/.local/share/virtualenvs/nucypher-9CH2v_st/lib/python3.7/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/home/kieran/Git/nucypher/nucypher/network/nodes.py", line 534, in keep_learning_about_nodes
self.learn_from_teacher_node(eager=False)
File "/home/kieran/Git/nucypher/nucypher/network/nodes.py", line 746, in learn_from_teacher_node
if not self.learning_domains.intersection(node.serving_domains):
builtins.AttributeError: 'list' object has no attribute 'intersection'
|
builtins.AttributeError
|
def from_bytes(
cls,
ursula_as_bytes: bytes,
version: int = INCLUDED_IN_BYTESTRING,
federated_only: bool = False,
) -> "Ursula":
if version is INCLUDED_IN_BYTESTRING:
version, payload = cls.version_splitter(ursula_as_bytes, return_remainder=True)
else:
payload = ursula_as_bytes
# Check version and raise IsFromTheFuture if this node is... you guessed it...
if version > cls.LEARNER_VERSION:
# TODO: Some auto-updater logic?
try:
canonical_address, _ = BytestringSplitter(PUBLIC_ADDRESS_LENGTH)(
payload, return_remainder=True
)
checksum_address = to_checksum_address(canonical_address)
nickname, _ = nickname_from_seed(checksum_address)
display_name = "⇀{}↽ ({})".format(nickname, checksum_address)
message = cls.unknown_version_message.format(
display_name, version, cls.LEARNER_VERSION
)
except BytestringSplittingError:
message = cls.really_unknown_version_message.format(
version, cls.LEARNER_VERSION
)
raise cls.IsFromTheFuture(message)
# Version stuff checked out. Moving on.
node_info = cls.internal_splitter(payload)
powers_and_material = {
SigningPower: node_info.pop("verifying_key"),
DecryptingPower: node_info.pop("encrypting_key"),
}
interface_info = node_info.pop("rest_interface")
node_info["rest_host"] = interface_info.host
node_info["rest_port"] = interface_info.port
node_info["timestamp"] = maya.MayaDT(node_info.pop("timestamp"))
node_info["checksum_public_address"] = to_checksum_address(
node_info.pop("public_address")
)
domains_vbytes = VariableLengthBytestring.dispense(node_info["domains"])
node_info["domains"] = set(constant_or_bytes(d) for d in domains_vbytes)
ursula = cls.from_public_keys(
powers_and_material, federated_only=federated_only, **node_info
)
return ursula
|
def from_bytes(
cls,
ursula_as_bytes: bytes,
version: int = INCLUDED_IN_BYTESTRING,
federated_only: bool = False,
) -> "Ursula":
if version is INCLUDED_IN_BYTESTRING:
version, payload = cls.version_splitter(ursula_as_bytes, return_remainder=True)
else:
payload = ursula_as_bytes
# Check version and raise IsFromTheFuture if this node is... you guessed it...
if version > cls.LEARNER_VERSION:
# TODO: Some auto-updater logic?
try:
canonical_address, _ = BytestringSplitter(PUBLIC_ADDRESS_LENGTH)(
payload, return_remainder=True
)
checksum_address = to_checksum_address(canonical_address)
nickname, _ = nickname_from_seed(checksum_address)
display_name = "⇀{}↽ ({})".format(nickname, checksum_address)
message = cls.unknown_version_message.format(
display_name, version, cls.LEARNER_VERSION
)
except BytestringSplittingError:
message = cls.really_unknown_version_message.format(
version, cls.LEARNER_VERSION
)
raise cls.IsFromTheFuture(message)
# Version stuff checked out. Moving on.
node_info = cls.internal_splitter(payload)
powers_and_material = {
SigningPower: node_info.pop("verifying_key"),
DecryptingPower: node_info.pop("encrypting_key"),
}
interface_info = node_info.pop("rest_interface")
node_info["rest_host"] = interface_info.host
node_info["rest_port"] = interface_info.port
node_info["timestamp"] = maya.MayaDT(node_info.pop("timestamp"))
node_info["checksum_public_address"] = to_checksum_address(
node_info.pop("public_address")
)
domains_vbytes = VariableLengthBytestring.dispense(node_info["domains"])
node_info["domains"] = [constant_or_bytes(d) for d in domains_vbytes]
ursula = cls.from_public_keys(
powers_and_material, federated_only=federated_only, **node_info
)
return ursula
|
https://github.com/nucypher/nucypher/issues/911
|
- generated xml file: /home/kieran/Git/nucypher/tests/characters/reports/pytest-results.xml -
Coverage statistics reporting failed
Traceback (most recent call last):
File "/snap/pycharm-professional/127/helpers/pycharm/teamcity/pytest_plugin.py", line 354, in pytest_terminal_summary
self._report_coverage()
File "/snap/pycharm-professional/127/helpers/pycharm/teamcity/pytest_plugin.py", line 421, in _report_coverage
self.teamcity,
File "/snap/pycharm-professional/127/helpers/pycharm/teamcity/pytest_plugin.py", line 368, in __init__
self.branches = coverage.data.has_arcs()
AttributeError: 'Coverage' object has no attribute 'data'
|
AttributeError
|
def convert_token_dict(token_dict):
"""Convert the dictionary format input token to the CoNLL-U format output token. This is the reverse function of
`convert_conll_token`.
Input: dictionary format token, which is a dictionaries for the token.
Output: CoNLL-U format token, which is a list for the token.
"""
token_conll = ["_" for i in range(FIELD_NUM)]
for key in token_dict:
if key == ID:
token_conll[FIELD_TO_IDX[key]] = (
"-".join([str(x) for x in token_dict[key]])
if isinstance(token_dict[key], tuple)
else str(token_dict[key])
)
elif key in FIELD_TO_IDX:
token_conll[FIELD_TO_IDX[key]] = str(token_dict[key])
# when a word (not mwt token) without head is found, we insert dummy head as required by the UD eval script
if "-" not in token_conll[FIELD_TO_IDX[ID]] and HEAD not in token_dict:
token_conll[FIELD_TO_IDX[HEAD]] = str(
int(
token_dict[ID] if isinstance(token_dict[ID], int) else token_dict[ID][0]
)
- 1
) # evaluation script requires head: int
return token_conll
|
def convert_token_dict(token_dict):
"""Convert the dictionary format input token to the CoNLL-U format output token. This is the reverse function of
`convert_conll_token`.
Input: dictionary format token, which is a dictionaries for the token.
Output: CoNLL-U format token, which is a list for the token.
"""
token_conll = ["_" for i in range(FIELD_NUM)]
for key in token_dict:
if key == ID:
token_conll[FIELD_TO_IDX[key]] = (
"-".join([str(x) for x in token_dict[key]])
if isinstance(token_dict[key], tuple)
else str(token_dict[key])
)
elif key in FIELD_TO_IDX:
token_conll[FIELD_TO_IDX[key]] = str(token_dict[key])
# when a word (not mwt token) without head is found, we insert dummy head as required by the UD eval script
if "-" not in token_conll[FIELD_TO_IDX[ID]] and HEAD not in token_dict:
token_conll[FIELD_TO_IDX[HEAD]] = str(
(token_dict[ID] if isinstance(token_dict[ID], int) else token_dict[ID][0])
- 1
) # evaluation script requires head: int
return token_conll
|
https://github.com/stanfordnlp/stanza/issues/483
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/m0re/projects/phd/stanza/stanza/utils/conll.py", line 112, in convert_dict
token_conll = CoNLL.convert_token_dict(token_dict)
File "/home/m0re/projects/phd/stanza/stanza/utils/conll.py", line 132, in convert_token_dict
token_conll[FIELD_TO_IDX[HEAD]] = str((token_dict[ID] if isinstance(token_dict[ID], int) else token_dict[ID][0]) - 1) # evaluation script requires head: int
TypeError: unsupported operand type(s) for -: 'str' and 'int'
|
TypeError
|
def display_results(
self,
endpoints,
fields,
sort_by=0,
max_width=0,
unique=False,
nonzero=False,
output_format="table",
ipv4_only=True,
ipv6_only=False,
ipv4_and_ipv6=False,
):
matrix = []
fields_lookup = {
"id": (GetData._get_name, 0),
"mac": (GetData._get_mac, 1),
"mac address": (GetData._get_mac, 1),
"switch": (GetData._get_switch, 2),
"port": (GetData._get_port, 3),
"vlan": (GetData._get_vlan, 4),
"ipv4": (GetData._get_ipv4, 5),
"ipv4 subnet": (GetData._get_ipv4_subnet, 6),
"ipv6": (GetData._get_ipv6, 7),
"ipv6 subnet": (GetData._get_ipv6_subnet, 8),
"ethernet vendor": (GetData._get_ether_vendor, 9),
"ignored": (GetData._get_ignored, 10),
"state": (GetData._get_state, 11),
"next state": (GetData._get_next_state, 12),
"first seen": (GetData._get_first_seen, 13),
"last seen": (GetData._get_last_seen, 14),
"previous states": (GetData._get_prev_states, 15),
"ipv4 os": (GetData._get_ipv4_os, 16),
"ipv4 os\n(p0f)": (GetData._get_ipv4_os, 16),
"ipv6 os": (GetData._get_ipv6_os, 17),
"ipv6 os\n(p0f)": (GetData._get_ipv6_os, 17),
"previous ipv4 oses": (GetData._get_prev_ipv4_oses, 18),
"previous ipv4 oses\n(p0f)": (GetData._get_prev_ipv4_oses, 18),
"previous ipv6 oses": (GetData._get_prev_ipv6_oses, 19),
"previous ipv6 oses\n(p0f)": (GetData._get_prev_ipv6_oses, 19),
"role": (GetData._get_role, 20),
"role\n(poseidonml)": (GetData._get_role, 20),
"role confidence": (GetData._get_role_confidence, 21),
"role confidence\n(poseidonml)": (GetData._get_role_confidence, 21),
"previous roles": (GetData._get_prev_roles, 22),
"previous roles\n(poseidonml)": (GetData._get_prev_roles, 22),
"previous role confidences": (GetData._get_prev_role_confidences, 23),
"previous role confidences\n(poseidonml)": (
GetData._get_prev_role_confidences,
23,
),
"behavior": (GetData._get_behavior, 24),
"behavior\n(poseidonml)": (GetData._get_behavior, 24),
"previous behaviors": (GetData._get_prev_behaviors, 25),
"previous behaviors\n(poseidonml)": (GetData._get_prev_behaviors, 25),
"ipv4 rdns": (GetData._get_ipv4_rdns, 26),
"ipv6 rdns": (GetData._get_ipv6_rdns, 27),
"sdn controller type": (GetData._get_controller_type, 28),
"sdn controller uri": (GetData._get_controller, 29),
}
for index, field in enumerate(fields):
if ipv4_only:
if "6" in field:
fields[index] = field.replace("6", "4")
if ipv6_only:
if "4" in field:
fields[index] = field.replace("4", "6")
if ipv4_and_ipv6:
for index, field in enumerate(fields):
if "4" in field:
if field.replace("4", "6") not in fields:
fields.insert(index + 1, field.replace("4", "6"))
if "6" in field:
if field.replace("6", "4") not in fields:
fields.insert(index + 1, field.replace("6", "4"))
if nonzero or unique:
records = []
for endpoint in endpoints:
record = []
for field in fields:
record.append(fields_lookup[field.lower()][0](endpoint))
# remove rows that are all zero or 'NO DATA'
if not nonzero or not all(
item == "0" or item == "NO DATA" for item in record
):
records.append(record)
# remove columns that are all zero or 'NO DATA'
del_columns = []
for i in range(len(fields)):
marked = False
if nonzero and all(
item[i] == "0" or item[i] == "NO DATA" for item in records
):
del_columns.append(i)
marked = True
if unique and not marked:
column_vals = [item[i] for item in records]
if len(set(column_vals)) == 1:
del_columns.append(i)
del_columns.reverse()
for val in del_columns:
for row in records:
del row[val]
del fields[val]
if len(fields) > 0:
matrix = records
if not nonzero and not unique:
for endpoint in endpoints:
record = []
for field in fields:
record.append(fields_lookup[field.lower()][0](endpoint))
matrix.append(record)
if len(matrix) > 0:
matrix = sorted(matrix, key=lambda endpoint: endpoint[sort_by])
# swap out field names for header
fields_header = []
for field in fields:
fields_header.append(self.all_fields[fields_lookup[field.lower()][1]])
# set the header
matrix.insert(0, fields_header)
table = Texttable(max_width=max_width)
# make all the column types be text
table.set_cols_dtype(["t"] * len(fields))
table.add_rows(matrix)
print(table.draw())
else:
print("No results found for that query.")
return
|
def display_results(
self,
endpoints,
fields,
sort_by=0,
max_width=0,
unique=False,
nonzero=False,
output_format="table",
ipv4_only=True,
ipv6_only=False,
ipv4_and_ipv6=False,
):
matrix = []
fields_lookup = {
"id": (GetData._get_name, 0),
"mac": (GetData._get_mac, 1),
"mac address": (GetData._get_mac, 1),
"switch": (GetData._get_switch, 2),
"port": (GetData._get_port, 3),
"vlan": (GetData._get_vlan, 4),
"ipv4": (GetData._get_ipv4, 5),
"ipv4 subnet": (GetData._get_ipv4_subnet, 6),
"ipv6": (GetData._get_ipv6, 7),
"ipv6 subnet": (GetData._get_ipv6_subnet, 8),
"ethernet vendor": (GetData._get_ether_vendor, 9),
"ignored": (GetData._get_ignored, 10),
"state": (GetData._get_state, 11),
"next state": (GetData._get_next_state, 12),
"first seen": (GetData._get_first_seen, 13),
"last seen": (GetData._get_last_seen, 14),
"previous states": (GetData._get_prev_states, 15),
"ipv4 os": (GetData._get_ipv4_os, 16),
"ipv4 os\n(p0f)": (GetData._get_ipv4_os, 16),
"ipv6 os": (GetData._get_ipv6_os, 17),
"ipv6 os\n(p0f)": (GetData._get_ipv6_os, 17),
"previous ipv4 oses": (GetData._get_prev_ipv4_oses, 18),
"previous ipv4 oses\n(p0f)": (GetData._get_prev_ipv4_oses, 18),
"previous ipv6 oses": (GetData._get_prev_ipv6_oses, 19),
"previous ipv6 oses\n(p0f)": (GetData._get_prev_ipv6_oses, 19),
"role": (GetData._get_role, 20),
"role\n(poseidonml)": (GetData._get_role, 20),
"role confidence": (GetData._get_role_confidence, 21),
"role confidence\n(poseidonml)": (GetData._get_role_confidence, 21),
"previous roles": (GetData._get_prev_roles, 22),
"previous roles\n(poseidonml)": (GetData._get_prev_roles, 22),
"previous role confidences": (GetData._get_prev_role_confidences, 23),
"previous role confidences\n(poseidonml)": (
GetData._get_prev_role_confidences,
23,
),
"behavior": (GetData._get_behavior, 24),
"behavior\n(poseidonml)": (GetData._get_behavior, 24),
"previous behaviors": (GetData._get_prev_behaviors, 25),
"previous behaviors\n(poseidonml)": (GetData._get_prev_behaviors, 25),
"ipv4 rdns": (GetData._get_ipv4_rdns, 26),
"ipv6 rdns": (GetData._get_ipv6_rdns, 27),
"sdn controller type": (GetData._get_controller_type, 28),
"sdn controller uri": (GetData._get_controller, 29),
}
for index, field in enumerate(fields):
if ipv4_only:
if "6" in field:
fields[index] = field.replace("6", "4")
if ipv6_only:
if "4" in field:
fields[index] = field.replace("4", "6")
if ipv4_and_ipv6:
for index, field in enumerate(fields):
if "4" in field:
if field.replace("4", "6") not in fields:
fields.insert(index + 1, field.replace("4", "6"))
if "6" in field:
if field.replace("6", "4") not in fields:
fields.insert(index + 1, field.replace("6", "4"))
if nonzero or unique:
records = []
for endpoint in endpoints:
record = []
for field in fields:
record.append(fields_lookup[field.lower()][0](endpoint))
# remove rows that are all zero or 'NO DATA'
if not nonzero or not all(
item == "0" or item == "NO DATA" for item in record
):
records.append(record)
# remove columns that are all zero or 'NO DATA'
del_columns = []
for i in range(len(fields)):
marked = False
if nonzero and all(
item[i] == "0" or item[i] == "NO DATA" for item in records
):
del_columns.append(i)
marked = True
if unique and not marked:
column_vals = [item[i] for item in records]
if len(set(column_vals)) == 1:
del_columns.append(i)
del_columns.reverse()
for val in del_columns:
for row in records:
del row[val]
del fields[val]
matrix = records
if not nonzero and not unique:
for endpoint in endpoints:
record = []
for field in fields:
record.append(fields_lookup[field.lower()][0](endpoint))
matrix.append(record)
if len(matrix) > 0:
matrix = sorted(matrix, key=lambda endpoint: endpoint[sort_by])
# swap out field names for header
fields_header = []
for field in fields:
fields_header.append(self.all_fields[fields_lookup[field.lower()][1]])
# set the header
matrix.insert(0, fields_header)
table = Texttable(max_width=max_width)
# make all the column types be text
table.set_cols_dtype(["t"] * len(fields))
table.add_rows(matrix)
print(table.draw())
else:
print("No results found for that query.")
return
|
https://github.com/IQTLabs/poseidon/issues/1152
|
poseidon$ show all -4and6 --fields=[next state]
[ERROR] Exception in show_all: '<' not supported between instances of 'NoneType' and 'str'
Traceback (most recent call last):
File "/poseidon/poseidon/helpers/exception_decor.py", line 19, in wrapper
return function(*args, **kwargs)
File "/poseidon/poseidon/cli/cli.py", line 500, in show_all
arg), fields, sort_by=sort_by, max_width=max_width, unique=unique, nonzero=nonzero, output_format=output_format, ipv4_only=ipv4_only, ipv6_only=ipv6_only, ipv4_and_ipv6=ipv4_and_ipv6)
File "/poseidon/poseidon/cli/cli.py", line 439, in display_results
matrix = sorted(matrix, key=lambda endpoint: endpoint[sort_by])
TypeError: '<' not supported between instances of 'NoneType' and 'str'
|
TypeError
|
def _get_next_state(endpoint):
return str(endpoint.p_next_state)
|
def _get_next_state(endpoint):
return endpoint.p_next_state
|
https://github.com/IQTLabs/poseidon/issues/1152
|
poseidon$ show all -4and6 --fields=[next state]
[ERROR] Exception in show_all: '<' not supported between instances of 'NoneType' and 'str'
Traceback (most recent call last):
File "/poseidon/poseidon/helpers/exception_decor.py", line 19, in wrapper
return function(*args, **kwargs)
File "/poseidon/poseidon/cli/cli.py", line 500, in show_all
arg), fields, sort_by=sort_by, max_width=max_width, unique=unique, nonzero=nonzero, output_format=output_format, ipv4_only=ipv4_only, ipv6_only=ipv6_only, ipv4_and_ipv6=ipv4_and_ipv6)
File "/poseidon/poseidon/cli/cli.py", line 439, in display_results
matrix = sorted(matrix, key=lambda endpoint: endpoint[sort_by])
TypeError: '<' not supported between instances of 'NoneType' and 'str'
|
TypeError
|
def do_task(self, arg):
"""Perform task to things on the network"""
# TODO
flags, arg = self.parser.get_flags(arg)
if arg:
action = arg.split()[0]
func_calls = {
"clear": self.task_clear,
"collect": self.task_collect,
"ignore": self.task_ignore,
"remove": self.task_remove,
"set": self.task_set,
}
if action in func_calls:
if len(arg.split()) > 1:
func_calls[action](arg, flags)
else:
print(action.upper() + " <ID|IP|MAC>")
else:
print("Unknown command, try 'help task'")
else:
self.help_task()
|
def do_task(self, arg):
"""Perform task to things on the network"""
# TODO
flags, arg = self.parser.get_flags(arg)
action = arg.split()[0]
func_calls = {
"clear": self.task_clear,
"collect": self.task_collect,
"ignore": self.task_ignore,
"remove": self.task_remove,
"set": self.task_set,
}
if action in func_calls:
if len(arg.split()) > 1:
func_calls[action](arg, flags)
else:
print(action.upper() + " <ID|IP|MAC>")
else:
print("Unknown command, try 'help task'")
|
https://github.com/IQTLabs/poseidon/issues/1075
|
[ERROR] Exception in do_show: list index out of range
Traceback (most recent call last):
File "/poseidon/poseidon/helpers/exception_decor.py", line 19, in wrapper
return function(*args, **kwargs)
File "/poseidon/poseidon/cli/cli.py", line 687, in do_show
action = arg.split()[0]
IndexError: list index out of range
|
IndexError
|
def do_show(self, arg):
"""Show things on the network based on filters"""
flags, arg = self.parser.get_flags(arg)
if arg:
action = arg.split()[0]
func_calls = {
"all": self.show_all,
"authors": self.show_authors,
"behavior": self.show_behavior,
"history": self.show_history,
"os": self.show_os,
"role": self.show_role,
"state": self.show_state,
"what": self.show_what,
"where": self.show_where,
}
if action in func_calls:
if action in ["all", "authors"]:
func_calls[action](arg, flags)
elif action in ["history", "what", "where"]:
if len(arg.split()) > 1:
func_calls[action](arg, flags)
else:
print(action.upper() + " <ID|IP|MAC>")
else:
valid = False
for show_comm in self.show_completions:
if arg.startswith(show_comm):
valid = True
func_calls[action](arg, flags)
if not valid:
print("Unknown command, try 'help show'")
else:
print("Unknown command, try 'help show'")
else:
self.help_show()
|
def do_show(self, arg):
"""Show things on the network based on filters"""
flags, arg = self.parser.get_flags(arg)
action = arg.split()[0]
func_calls = {
"all": self.show_all,
"authors": self.show_authors,
"behavior": self.show_behavior,
"history": self.show_history,
"os": self.show_os,
"role": self.show_role,
"state": self.show_state,
"what": self.show_what,
"where": self.show_where,
}
if action in func_calls:
if action in ["all", "authors"]:
func_calls[action](arg, flags)
elif action in ["history", "what", "where"]:
if len(arg.split()) > 1:
func_calls[action](arg, flags)
else:
print(action.upper() + " <ID|IP|MAC>")
else:
valid = False
for show_comm in self.show_completions:
if arg.startswith(show_comm):
valid = True
func_calls[action](arg, flags)
if not valid:
print("Unknown command, try 'help show'")
else:
print("Unknown command, try 'help show'")
|
https://github.com/IQTLabs/poseidon/issues/1075
|
[ERROR] Exception in do_show: list index out of range
Traceback (most recent call last):
File "/poseidon/poseidon/helpers/exception_decor.py", line 19, in wrapper
return function(*args, **kwargs)
File "/poseidon/poseidon/cli/cli.py", line 687, in do_show
action = arg.split()[0]
IndexError: list index out of range
|
IndexError
|
def display_results(
self, endpoints, fields, sort_by=0, max_width=0, unique=False, nonzero=False
):
matrix = []
fields_lookup = {
"id": (PoseidonShell._get_name, 0),
"mac address": (PoseidonShell._get_mac, 1),
"switch": (PoseidonShell._get_switch, 2),
"port": (PoseidonShell._get_port, 3),
"vlan": (PoseidonShell._get_vlan, 4),
"ipv4": (PoseidonShell._get_ipv4, 5),
"ipv6": (PoseidonShell._get_ipv6, 6),
"ignored": (PoseidonShell._get_ignored, 7),
"state": (PoseidonShell._get_state, 8),
"next state": (PoseidonShell._get_next_state, 9),
"first seen": (PoseidonShell._get_first_seen, 10),
"last seen": (PoseidonShell._get_last_seen, 11),
"previous states": (PoseidonShell._get_prev_states, 12),
"ipv4 os": (PoseidonShell._get_ipv4_os, 13),
"ipv6 os": (PoseidonShell._get_ipv6_os, 14),
"previous ipv4 oses": (PoseidonShell._get_prev_ipv4_oses, 15),
"previous ipv6 oses": (PoseidonShell._get_prev_ipv6_oses, 16),
"role": (PoseidonShell._get_role, 17),
"role (confidence)": (PoseidonShell._get_role, 17),
"previous roles": (PoseidonShell._get_prev_roles, 18),
"behavior": (PoseidonShell._get_behavior, 19),
"previous behaviors": (PoseidonShell._get_prev_behaviors, 20),
}
# TODO #971 check if unqiue flag and limit columns (fields)
# TODO #963 check if nonzero flag and limit rows/columns
for endpoint in endpoints:
record = []
for field in fields:
record.append(fields_lookup[field.lower()][0](endpoint))
matrix.append(record)
if len(matrix) > 0:
matrix = sorted(matrix, key=lambda endpoint: endpoint[sort_by])
# swap out field names for header
fields_header = []
for field in fields:
fields_header.append(self.all_fields[fields_lookup[field.lower()][1]])
# set the header
matrix.insert(0, fields_header)
table = Texttable(max_width=max_width)
# make all the column types be text
table.set_cols_dtype(["t"] * len(fields))
table.add_rows(matrix)
print(table.draw())
else:
print("No results found for that query.")
return
|
def display_results(
self, endpoints, fields, sort_by=0, max_width=0, unique=False, nonzero=False
):
matrix = []
fields_lookup = {
"id": (PoseidonShell._get_name, 0),
"mac address": (PoseidonShell._get_mac, 1),
"switch": (PoseidonShell._get_switch, 2),
"port": (PoseidonShell._get_port, 3),
"vlan": (PoseidonShell._get_vlan, 4),
"ipv4": (PoseidonShell._get_ipv4, 5),
"ipv6": (PoseidonShell._get_ipv6, 6),
"ignored": (PoseidonShell._get_ignored, 7),
"state": (PoseidonShell._get_state, 8),
"next state": (PoseidonShell._get_next_state, 9),
"first seen": (PoseidonShell._get_first_seen, 10),
"last seen": (PoseidonShell._get_last_seen, 11),
"previous states": (PoseidonShell._get_prev_states, 12),
"ipv4 os": (PoseidonShell._get_ipv4_os, 13),
"ipv6 os": (PoseidonShell._get_ipv6_os, 14),
"previous ipv4 oses": (PoseidonShell._get_prev_ipv4_oses, 15),
"previous ipv6 oses": (PoseidonShell._get_prev_ipv6_oses, 16),
"device type": (PoseidonShell._get_device_type, 17),
"device type (confidence)": (PoseidonShell._get_device_type, 17),
"previous device types": (PoseidonShell._get_prev_device_types, 18),
"device behavior": (PoseidonShell._get_device_behavior, 19),
"previous device behaviors": (PoseidonShell._get_prev_device_behaviors, 20),
}
# TODO #971 check if unqiue flag and limit columns (fields)
# TODO #963 check if nonzero flag and limit rows/columns
for endpoint in endpoints:
record = []
for field in fields:
record.append(fields_lookup[field.lower()][0](endpoint))
matrix.append(record)
if len(matrix) > 0:
matrix = sorted(matrix, key=lambda endpoint: endpoint[sort_by])
# swap out field names for header
fields_header = []
for field in fields:
fields_header.append(self.all_fields[fields_lookup[field.lower()][1]])
# set the header
matrix.insert(0, fields_header)
table = Texttable(max_width=max_width)
# make all the column types be text
table.set_cols_dtype(["t"] * len(fields))
table.add_rows(matrix)
print(table.draw())
else:
print("No results found for that query.")
return
|
https://github.com/IQTLabs/poseidon/issues/1040
|
poseidon$ record
Traceback (most recent call last):
File "/poseidon/poseidon/cli/cli.py", line 547, in <module>
PoseidonShell().cmdloop()
File "/usr/lib/python3.6/cmd.py", line 138, in cmdloop
stop = self.onecmd(line)
File "/usr/lib/python3.6/cmd.py", line 217, in onecmd
return func(arg)
File "/poseidon/poseidon/cli/cli.py", line 526, in do_record
self.file = open(arg, 'w')
FileNotFoundError: [Errno 2] No such file or directory: ''
|
FileNotFoundError
|
def do_quit(self, arg):
"Stop the shell and exit: QUIT"
print("Thank you for using Poseidon")
self.close()
return True
|
def do_quit(self, arg):
"Stop recording and exit: QUIT"
print("Thank you for using Poseidon")
self.close()
return True
|
https://github.com/IQTLabs/poseidon/issues/1040
|
poseidon$ record
Traceback (most recent call last):
File "/poseidon/poseidon/cli/cli.py", line 547, in <module>
PoseidonShell().cmdloop()
File "/usr/lib/python3.6/cmd.py", line 138, in cmdloop
stop = self.onecmd(line)
File "/usr/lib/python3.6/cmd.py", line 217, in onecmd
return func(arg)
File "/poseidon/poseidon/cli/cli.py", line 526, in do_record
self.file = open(arg, 'w')
FileNotFoundError: [Errno 2] No such file or directory: ''
|
FileNotFoundError
|
def log(self, log_file):
mac_table = {}
if not log_file:
# default to FAUCET default
log_file = "/var/log/ryu/faucet/faucet.log"
# NOTE very fragile, prone to errors
try:
with open(log_file, "r") as f:
for line in f:
if "L2 learned" in line:
learned_mac = line.split()
data = {
"ip-address": learned_mac[16][0:-1],
"ip-state": "L2 learned",
"mac": learned_mac[10],
"segment": learned_mac[7][1:-1],
"port": learned_mac[22],
"tenant": learned_mac[24] + learned_mac[25],
}
if learned_mac[10] in mac_table:
dup = False
for d in mac_table[learned_mac[10]]:
if data == d:
dup = True
if dup:
mac_table[learned_mac[10]].remove(data)
mac_table[learned_mac[10]].insert(0, data)
else:
mac_table[learned_mac[10]] = [data]
except Exception as e:
self.logger.debug("error {0}".format(str(e)))
return mac_table
|
def log(self, log_file):
mac_table = {}
if not log_file:
# default to FAUCET default
log_file = "/var/log/ryu/faucet/faucet.log"
# NOTE very fragile, prone to errors
try:
with open(log_file, "r") as f:
for line in f:
if "L2 learned" in line:
learned_mac = line.split()
data = {
"ip-address": learned_mac[16][0:-1],
"ip-state": "L2 learned",
"mac": learned_mac[10],
"segment": learned_mac[7][1:-1],
"port": learned_mac[19],
"tenant": learned_mac[21] + learned_mac[22],
}
if learned_mac[10] in mac_table:
dup = False
for d in mac_table[learned_mac[10]]:
if data == d:
dup = True
if dup:
mac_table[learned_mac[10]].remove(data)
mac_table[learned_mac[10]].insert(0, data)
else:
mac_table[learned_mac[10]] = [data]
except Exception as e:
self.logger.debug("error {0}".format(str(e)))
return mac_table
|
https://github.com/IQTLabs/poseidon/issues/505
|
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: Traceback (most recent call last):
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: File "/tmp/poseidonWork/poseidon/poseidonMonitor/poseidonMonitor.py", line 538, in <module>
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: main(skip_rabbit=False)
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: File "/tmp/poseidonWork/poseidon/poseidonMonitor/poseidonMonitor.py", line 528, in main
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: pmain.process()
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: File "/tmp/poseidonWork/poseidon/poseidonMonitor/poseidonMonitor.py", line 419, in process
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: self.uss.mirror_endpoint(endpoint_hash)
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: File "/tmp/poseidonWork/poseidon/poseidonMonitor/NorthBoundControllerAbstraction/UpdateSwitchState.py", line 150, in mirror_endpoint
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: self.sdnc.mirror_ip(my_ip)
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: File "/tmp/poseidonWork/poseidon/poseidonMonitor/NorthBoundControllerAbstraction/proxy/faucet/faucet.py", line 178, in mirror_ip
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: 'mirror', int(port), switch):
2018-03-05T11:55:18+00:00 172.17.0.1 plugin[788]: ValueError: invalid literal for int() with base 10: '192.168.1.255)'
|
ValueError
|
def iterbatches(
self, batch_size=None, epochs=1, deterministic=False, pad_batches=False
):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: `(X,
y, w, ids)`.
Parameters
----------
batch_size: int, optional
Number of elements in each batch
epochs: int, optional
Number of epochs to walk over dataset
deterministic: bool, optional
If True, follow deterministic order.
pad_batches: bool, optional
If True, pad each batch to `batch_size`.
Returns
-------
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`
"""
raise NotImplementedError()
|
def iterbatches(self, batch_size=None, epoch=0, deterministic=False, pad_batches=False):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: `(X,
y, w, ids)`.
Parameters
----------
batch_size: int, optional
Number of elements in each batch
epoch: int, optional
Number of epochs to walk over dataset
deterministic: bool, optional
If True, follow deterministic order.
pad_batches: bool, optional
If True, pad each batch to `batch_size`.
Returns
-------
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`
"""
raise NotImplementedError()
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def make_tf_dataset(
self, batch_size=100, epochs=1, deterministic=False, pad_batches=False
):
"""Create a tf.data.Dataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y,
w) for one batch.
Parameters
----------
batch_size: int
the number of samples to include in each batch
epochs: int
the number of times to iterate over the Dataset
deterministic: bool
if True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
pad_batches: bool
if True, batches are padded as necessary to make the size of
each batch exactly equal batch_size.
Returns
-------
tf.Dataset that iterates over the same data.
"""
# Retrieve the first sample so we can determine the dtypes.
import tensorflow as tf
X, y, w, ids = next(self.itersamples())
dtypes = (tf.as_dtype(X.dtype), tf.as_dtype(y.dtype), tf.as_dtype(w.dtype))
shapes = (
tf.TensorShape([None] + list(X.shape)),
tf.TensorShape([None] + list(y.shape)),
tf.TensorShape([None] + list(w.shape)),
)
# Create a Tensorflow Dataset.
def gen_data():
for X, y, w, ids in self.iterbatches(
batch_size, epochs, deterministic, pad_batches
):
yield (X, y, w)
return tf.data.Dataset.from_generator(gen_data, dtypes, shapes)
|
def make_tf_dataset(
self, batch_size=100, epochs=1, deterministic=False, pad_batches=False
):
"""Create a tf.data.Dataset that iterates over the data in this Dataset.
Each value returned by the Dataset's iterator is a tuple of (X, y,
w) for one batch.
Parameters
----------
batch_size: int
the number of samples to include in each batch
epochs: int
the number of times to iterate over the Dataset
deterministic: bool
if True, the data is produced in order. If False, a different
random permutation of the data is used for each epoch.
pad_batches: bool
if True, batches are padded as necessary to make the size of
each batch exactly equal batch_size.
Returns
-------
tf.Dataset that iterates over the same data.
"""
# Retrieve the first sample so we can determine the dtypes.
import tensorflow as tf
X, y, w, ids = next(self.itersamples())
dtypes = (tf.as_dtype(X.dtype), tf.as_dtype(y.dtype), tf.as_dtype(w.dtype))
shapes = (
tf.TensorShape([None] + list(X.shape)),
tf.TensorShape([None] + list(y.shape)),
tf.TensorShape([None] + list(w.shape)),
)
# Create a Tensorflow Dataset.
def gen_data():
for epoch in range(epochs):
for X, y, w, ids in self.iterbatches(
batch_size, epoch, deterministic, pad_batches
):
yield (X, y, w)
return tf.data.Dataset.from_generator(gen_data, dtypes, shapes)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def gen_data():
for X, y, w, ids in self.iterbatches(
batch_size, epochs, deterministic, pad_batches
):
yield (X, y, w)
|
def gen_data():
for epoch in range(epochs):
for X, y, w, ids in self.iterbatches(
batch_size, epoch, deterministic, pad_batches
):
yield (X, y, w)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def iterbatches(
self, batch_size=None, epochs=1, deterministic=False, pad_batches=False
):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: (X, y,
w, ids).
Parameters
----------
batch_size: int, optional
Number of elements in each batch
epochs: int, optional
Number of epochs to walk over dataset
deterministic: bool, optional
If True, follow deterministic order.
pad_batches: bool, optional
If True, pad each batch to `batch_size`.
Returns
-------
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`
"""
def iterate(dataset, batch_size, epochs, deterministic, pad_batches):
n_samples = dataset._X.shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_batch = dataset._X[perm_indices]
y_batch = dataset._y[perm_indices]
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch
)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
|
def iterbatches(self, batch_size=None, epoch=0, deterministic=False, pad_batches=False):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: (X, y,
w, ids).
Parameters
----------
batch_size: int, optional
Number of elements in each batch
epoch: int, optional
Number of epochs to walk over dataset
deterministic: bool, optional
If True, follow deterministic order.
pad_batches: bool, optional
If True, pad each batch to `batch_size`.
Returns
-------
Generator which yields tuples of four numpy arrays `(X, y, w, ids)`
"""
def iterate(dataset, batch_size, deterministic, pad_batches):
n_samples = dataset._X.shape[0]
if not deterministic:
sample_perm = np.random.permutation(n_samples)
else:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_batch = dataset._X[perm_indices]
y_batch = dataset._y[perm_indices]
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch
)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, deterministic, pad_batches)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def iterate(dataset, batch_size, epochs, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = dc.data.ImageLoader.load_img(
[dataset._X[i] for i in perm_indices]
)
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = dc.data.ImageLoader.load_img(
[dataset._y[i] for i in perm_indices]
)
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch
)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
|
def iterate(dataset, batch_size, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if not deterministic:
sample_perm = np.random.permutation(n_samples)
else:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = dc.data.ImageLoader.load_img(
[dataset._X[i] for i in perm_indices]
)
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = dc.data.ImageLoader.load_img(
[dataset._y[i] for i in perm_indices]
)
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch
)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def iterbatches(
self, batch_size=None, epochs=1, deterministic=False, pad_batches=False
):
"""Get an object that iterates over minibatches from the dataset.
It is guaranteed that the number of batches returned is
`math.ceil(len(dataset)/batch_size)`. Each minibatch is returned as
a tuple of four numpy arrays: `(X, y, w, ids)`.
Parameters:
-----------
batch_size: int
Number of elements in a batch. If None, then it yields batches
with size equal to the size of each individual shard.
epoch: int
Number of epochs to walk over dataset
deterministic: bool
Whether or not we should should shuffle each shard before
generating the batches. Note that this is only local in the
sense that it does not ever mix between different shards.
pad_batches: bool
Whether or not we should pad the last batch, globally, such that
it has exactly batch_size elements.
"""
shard_indices = list(range(self.get_number_shards()))
return self._iterbatches_from_shards(
shard_indices, batch_size, epochs, deterministic, pad_batches
)
|
def iterbatches(self, batch_size=None, epoch=0, deterministic=False, pad_batches=False):
"""Get an object that iterates over minibatches from the dataset.
It is guaranteed that the number of batches returned is
`math.ceil(len(dataset)/batch_size)`. Each minibatch is returned as
a tuple of four numpy arrays: `(X, y, w, ids)`.
Parameters:
-----------
batch_size: int
Number of elements in a batch. If None, then it yields batches
with size equal to the size of each individual shard.
epoch: int
Not used
deterministic: bool
Whether or not we should should shuffle each shard before
generating the batches. Note that this is only local in the
sense that it does not ever mix between different shards.
pad_batches: bool
Whether or not we should pad the last batch, globally, such that
it has exactly batch_size elements.
"""
shard_indices = list(range(self.get_number_shards()))
return self._iterbatches_from_shards(
shard_indices, batch_size, deterministic, pad_batches
)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def _iterbatches_from_shards(
self,
shard_indices,
batch_size=None,
epochs=1,
deterministic=False,
pad_batches=False,
):
"""Get an object that iterates over batches from a restricted set of shards."""
def iterate(dataset, batch_size, epochs):
num_shards = len(shard_indices)
if deterministic:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
pool = Pool(1) # mp.dummy aliases ThreadPool to Pool
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] / batch_size)
for epoch in range(epochs):
if not deterministic:
shard_perm = np.random.permutation(num_shards)
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[0]],)
)
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[cur_shard + 1]],)
)
elif epoch == epochs - 1:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples / shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples, (cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(X_b) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b, ids_b) = pad_batch(
shard_batch_size, X_b, y_b, w_b, ids_b
)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
return iterate(self, batch_size, epochs)
|
def _iterbatches_from_shards(
self, shard_indices, batch_size=None, deterministic=False, pad_batches=False
):
"""Get an object that iterates over batches from a restricted set of shards."""
def iterate(dataset, batch_size):
num_shards = len(shard_indices)
if not deterministic:
shard_perm = np.random.permutation(num_shards)
else:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
pool = Pool(1) # mp.dummy aliases ThreadPool to Pool
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[0]],)
)
total_yield = 0
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] / batch_size)
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[cur_shard + 1]],)
)
else:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples / shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples, (cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(X_b) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b, ids_b) = pad_batch(
shard_batch_size, X_b, y_b, w_b, ids_b
)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
return iterate(self, batch_size)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def iterate(dataset, batch_size, epochs):
num_shards = len(shard_indices)
if deterministic:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
pool = Pool(1) # mp.dummy aliases ThreadPool to Pool
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] / batch_size)
for epoch in range(epochs):
if not deterministic:
shard_perm = np.random.permutation(num_shards)
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[0]],)
)
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[cur_shard + 1]],)
)
elif epoch == epochs - 1:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples / shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples, (cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(X_b) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b, ids_b) = pad_batch(
shard_batch_size, X_b, y_b, w_b, ids_b
)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
|
def iterate(dataset, batch_size):
num_shards = len(shard_indices)
if not deterministic:
shard_perm = np.random.permutation(num_shards)
else:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
pool = Pool(1) # mp.dummy aliases ThreadPool to Pool
next_shard = pool.apply_async(dataset.get_shard, (shard_indices[shard_perm[0]],))
total_yield = 0
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] / batch_size)
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(
dataset.get_shard, (shard_indices[shard_perm[cur_shard + 1]],)
)
else:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples / shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples, (cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(X_b) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b, ids_b) = pad_batch(
shard_batch_size, X_b, y_b, w_b, ids_b
)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def iterbatches(
self, batch_size=None, epochs=1, deterministic=False, pad_batches=False
):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: (X, y,
w, ids).
"""
def iterate(dataset, batch_size, epochs, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if deterministic:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
for epoch in range(epochs):
if not deterministic:
sample_perm = np.random.permutation(n_samples)
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = dc.data.ImageLoader.load_img(
[dataset._X[i] for i in perm_indices]
)
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = dc.data.ImageLoader.load_img(
[dataset._y[i] for i in perm_indices]
)
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch
)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, epochs, deterministic, pad_batches)
|
def iterbatches(self, batch_size=None, epoch=0, deterministic=False, pad_batches=False):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: (X, y,
w, ids).
"""
def iterate(dataset, batch_size, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if not deterministic:
sample_perm = np.random.permutation(n_samples)
else:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = dc.data.ImageLoader.load_img(
[dataset._X[i] for i in perm_indices]
)
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = dc.data.ImageLoader.load_img(
[dataset._y[i] for i in perm_indices]
)
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch
)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, deterministic, pad_batches)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def iterbatches(self, **kwargs):
"""Loop through all internal datasets in the same order.
Parameters
----------
batch_size: int
Number of samples from each dataset to return
epochs: int
Number of times to loop through the datasets
pad_batches: boolean
Should all batches==batch_size
Returns
-------
Generator which yields a dictionary {key: dataset.X[batch]}
"""
key_order = [x for x in self.datasets.keys()]
if "epochs" in kwargs:
epochs = kwargs["epochs"]
del kwargs["epochs"]
else:
epochs = 1
kwargs["deterministic"] = True
for epoch in range(epochs):
iterators = [self.datasets[x].iterbatches(**kwargs) for x in key_order]
for tup in zip(*iterators):
m_d = {key_order[i]: tup[i][0] for i in range(len(key_order))}
yield m_d
|
def iterbatches(self, **kwargs):
"""Loop through all internal datasets in the same order.
Parameters
----------
batch_size: int
Number of samples from each dataset to return
epoch: int
Number of times to loop through the datasets
pad_batches: boolean
Should all batches==batch_size
Returns
-------
Generator which yields a dictionary {key: dataset.X[batch]}
"""
key_order = [x for x in self.datasets.keys()]
if "epochs" in kwargs:
epochs = kwargs["epochs"]
del kwargs["epochs"]
else:
epochs = 1
kwargs["deterministic"] = True
for epoch in range(epochs):
iterators = [self.datasets[x].iterbatches(**kwargs) for x in key_order]
for tup in zip(*iterators):
m_d = {key_order[i]: tup[i][0] for i in range(len(key_order))}
yield m_d
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def fit_on_batch(
self,
X,
y,
w,
variables=None,
loss=None,
callbacks=[],
checkpoint=True,
max_checkpoints_to_keep=5,
):
"""Perform a single step of training.
Parameters
----------
X: ndarray
the inputs for the batch
y: ndarray
the labels for the batch
w: ndarray
the weights for the batch
variables: list of tf.Variable
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
checkpoint: bool
if true, save a checkpoint after performing the training step
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
"""
self._ensure_built()
dataset = NumpyDataset(X, y, w)
return self.fit(
dataset,
nb_epoch=1,
max_checkpoints_to_keep=max_checkpoints_to_keep,
checkpoint_interval=self._global_step.numpy() + 2 if checkpoint else 0,
variables=variables,
loss=loss,
callbacks=callbacks,
)
|
def fit_on_batch(self, X, y, w, variables=None, loss=None, callbacks=[]):
"""Perform a single step of training.
Parameters
----------
X: ndarray
the inputs for the batch
y: ndarray
the labels for the batch
w: ndarray
the weights for the batch
variables: list of tf.Variable
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
"""
if not self.built:
self.build()
dataset = NumpyDataset(X, y, w)
return self.fit(
dataset, nb_epoch=1, variables=variables, loss=loss, callbacks=callbacks
)
|
https://github.com/deepchem/deepchem/issues/1909
|
Traceback (most recent call last):
File "test_fit_on_batch.py", line 23, in <module>
model.fit_on_batch(X, y, w)
File "/Users/bharath/Code/deepchem/deepchem/models/keras_model.py", line 428, in fit_on_batch
if not self.built:
AttributeError: 'GraphConvModel' object has no attribute 'built'
|
AttributeError
|
def ensure_not_chunked(arrow_array):
if isinstance(arrow_array, pa.ChunkedArray):
if len(arrow_array.chunks) == 1:
return arrow_array.chunks[0]
table = pa.Table.from_arrays([arrow_array], ["single"])
table_concat = table.combine_chunks()
column = table_concat.columns[0]
assert column.num_chunks == 1
arrow_array = column.chunk(0)
return arrow_array
|
def ensure_not_chunked(arrow_array):
if isinstance(arrow_array, pa.ChunkedArray):
if len(arrow_array.chunks) == 0:
return arrow_array.chunks[0]
table = pa.Table.from_arrays([arrow_array], ["single"])
table_concat = table.combine_chunks()
column = table_concat.columns[0]
assert column.num_chunks == 1
arrow_array = column.chunk(0)
return arrow_array
|
https://github.com/vaexio/vaex/issues/1101
|
AttributeError Traceback (most recent call last)
<ipython-input-16-cc5013060237> in <module>
----> 1 df.column_a.mean()
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/expression.py in mean(self, binby, limits, shape, selection, delay, progress)
550 del kwargs['self']
551 kwargs['expression'] = self.expression
--> 552 return self.ds.mean(**kwargs)
553
554 def std(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in mean(self, expression, binby, limits, shape, selection, delay, progress, edges, array_type)
851 :return: {return_stat_scalar}
852 """
--> 853 return self._compute_agg('mean', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
854 logger.debug("mean of %r, with binby=%r, limits=%r, shape=%r, selection=%r, delay=%r", expression, binby, limits, shape, selection, delay)
855 expression = _ensure_strings_from_expressions(expression)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _compute_agg(self, name, expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions, array_type)
683 # give errors when evaluated with NaN's present.
684 # TODO: GET RID OF THIS
--> 685 len(self) # fill caches and masks
686 # pass
687 grid = self._create_grid(binby, limits, shape, selection=selection, delay=True)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in __len__(self)
3763 else:
3764 if self._cached_filtered_length is None:
-> 3765 self. _cached_filtered_length = int(self.count())
3766 return self._cached_filtered_length
3767
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in count(self, expression, binby, limits, shape, selection, delay, edges, progress, array_type)
768 :return: {return_stat_scalar}
769 """
--> 770 return self._compute_agg('count', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
771
772 @delayed
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _compute_agg(self, name, expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions, array_type)
742 stats = [compute(expression, grid, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions]
743 var = finish(grid, *stats)
--> 744 return self._delay(delay, var)
745
746 @docsubst
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _delay(self, delay, task, progressbar)
1506 return task
1507 else:
-> 1508 self.execute()
1509 return task.get()
1510
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in execute(self)
343 '''Execute all delayed jobs.'''
344 from .asyncio import just_run
--> 345 just_run(self.execute_async())
346
347 async def execute_async(self):
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/asyncio.py in just_run(coro)
33 nest_asyncio.apply()
34 check_patch_tornado()
---> 35 return loop.run_until_complete(coro)
36 finally:
37 if not had_loop: # remove loop if we did not have one
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/nest_asyncio.py in run_until_complete(self, future)
96 raise RuntimeError(
97 'Event loop stopped before Future completed.')
---> 98 return f.result()
99 finally:
100 self._thread_id = old_thread_id
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/tasks.py in _step(***failed resolving arguments***)
180 result = coro.send(None)
181 else:
--> 182 result = coro.throw(exc)
183 except StopIteration as exc:
184 if self._must_cancel:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in execute_async(self)
348 '''Async version of execute'''
349 # no need to clear _task_aggs anymore, since they will be removed for the executors' task list
--> 350 await self.executor.execute_async()
351
352 @property
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/execution.py in execute_async(self)
182 all([all(task.signal_progress.emit(p)) for task in tasks]) and
183 all([not task.cancelled for task in tasks]),
--> 184 cancel=lambda: self._cancel(run), unpack=True, run=run):
185 pass # just eat all element
186 logger.debug("executing took %r seconds" % (time.time() - t0))
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/multithreading.py in map_async(self, callable, iterator, count, on_error, progress, cancel, unpack, **kwargs_extra)
88 total = 0
89 for i, value in buffer(enumerate(iterator), self._max_workers + 3):
---> 90 value = await value
91 if value != None:
92 total += value
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/futures.py in __iter__(self)
325 if not self.done():
326 self._asyncio_future_blocking = True
--> 327 yield self # This tells Task to wait for completion.
328 assert self.done(), "yield from wasn't used with future"
329 return self.result() # May raise too.
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/tasks.py in _wakeup(self, future)
248 def _wakeup(self, future):
249 try:
--> 250 future.result()
251 except Exception as exc:
252 # This may also be a cancellation.
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/futures.py in result(self)
241 self._log_traceback = False
242 if self._exception is not None:
--> 243 raise self._exception
244 return self._result
245
~/.conda-envs/datalakeutils/lib/python3.6/concurrent/futures/thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/multithreading.py in <lambda>(value)
84 else:
85 loop = asyncio.get_event_loop()
---> 86 iterator = (loop.run_in_executor(self, lambda value=value: wrapped(value)) for value in iterator)
87
88 total = 0
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/multithreading.py in wrapped(*args, **kwargs)
76 # print("SLEEP", self._debug_sleep)
77 time.sleep(self._debug_sleep)
---> 78 return callable(self.local.index, *args, **kwargs, **kwargs_extra)
79 # convert to list so we can count
80 time_last = time.time() - 100
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/execution.py in process_part(self, thread_index, i1, i2, chunks, run)
232 assert len(chunk) == N, f'Oops, got a chunk ({name}) of length {len(chunk)} while it is expected to be of length {N} (at {i1}-{i2}'
233 if run.pre_filter:
--> 234 filter_mask = df.evaluate_selection_mask(None, i1=i1, i2=i2, cache=True)
235 chunks = {k:vaex.array_types.filter(v, filter_mask) for k, v, in chunks.items()}
236 else:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in evaluate_selection_mask(self, name, i1, i2, selection, cache, filtered, pre_filtered)
2578 if name in [None, False] and self.filtered and filtered:
2579 scope_global = scopes._BlockScopeSelection(self, i1, i2, None, cache=cache)
-> 2580 mask_global = scope_global.evaluate(FILTER_SELECTION_NAME)
2581 return vaex.utils.unmask_selection_mask(mask_global)
2582 elif self.filtered and filtered and name != FILTER_SELECTION_NAME:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/scopes.py in evaluate(self, expression)
181 try:
182 expression = _ensure_string_from_expression(expression)
--> 183 result = eval(expression, expression_namespace, self)
184 except:
185 import traceback as tb
<string> in <module>
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/scopes.py in __getitem__(self, variable)
217 if variable in self.df.variables:
218 return self.df.variables[variable]
--> 219 mask_values = selection.evaluate(self.df, variable, self.i1, self.i2, self.filter_mask)
220
221 # get a view on a subset of the mask
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/selections.py in evaluate(self, df, name, i1, i2, filter_mask)
128 else:
129 previous_mask = None
--> 130 result = df._evaluate_selection_mask(self.boolean_expression, i1, i2, filter_mask=filter_mask)
131 if isinstance(result, bool):
132 N = i2 - i1
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _evaluate_selection_mask(self, name, i1, i2, selection, cache, filter_mask)
2564 i2 = i2 or len(self)
2565 scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache, filter_mask=filter_mask)
-> 2566 mask = scope.evaluate(name)
2567 # TODO: can we do without arrow->numpy conversion?
2568 mask = vaex.array_types.to_numpy(mask)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/scopes.py in evaluate(self, expression)
181 try:
182 expression = _ensure_string_from_expression(expression)
--> 183 result = eval(expression, expression_namespace, self)
184 except:
185 import traceback as tb
<string> in <module>
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/arrow/numpy_dispatch.py in operator(a, b)
85 result_data = op['op'](a_data, b_data)
86 if isinstance(a, NumpyDispatch):
---> 87 result_data = a.add_missing(result_data)
88 if isinstance(b, NumpyDispatch):
89 result_data = b.add_missing(result_data)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/arrow/numpy_dispatch.py in add_missing(self, ar)
39 if isinstance(self._array, vaex.array_types.supported_arrow_array_types):
40 ar = vaex.array_types.to_arrow(ar)
---> 41 ar = combine_missing(ar, self._array)
42 # else: both numpy, handled by numpy
43 else:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/arrow/numpy_dispatch.py in combine_missing(a, b)
11 # not optimal
12 nulls = pc.invert(pc.or_(a.is_null(), b.is_null()))
---> 13 assert nulls.offset == 0
14 nulls_buffer = nulls.buffers()[1]
15 # this is not the case: no reason why it should be (TODO: open arrow issue)
AttributeError: 'pyarrow.lib.ChunkedArray' object has no attribute 'offset'
|
AttributeError
|
def combine_missing(a, b):
# return a copy of a with missing values of a and b combined
if a.null_count > 0 or b.null_count > 0:
a, b = vaex.arrow.convert.align(a, b)
if isinstance(a, pa.ChunkedArray):
# divide and conquer
assert isinstance(b, pa.ChunkedArray)
assert len(a.chunks) == len(b.chunks)
return pa.chunked_array(
[combine_missing(ca, cb) for ca, cb in zip(a.chunks, b.chunks)]
)
if a.offset != 0:
a = vaex.arrow.convert.trim_buffers_ipc(a)
if b.offset != 0:
b = vaex.arrow.convert.trim_buffers_ipc(b)
assert a.offset == 0
assert b.offset == 0
# not optimal
nulls = pc.invert(pc.or_(a.is_null(), b.is_null()))
assert nulls.offset == 0
nulls_buffer = nulls.buffers()[1]
# this is not the case: no reason why it should be (TODO: open arrow issue)
# assert nulls.buffers()[0] is None
buffers = a.buffers()
return pa.Array.from_buffers(a.type, len(a), [nulls_buffer] + buffers[1:])
else:
return a
|
def combine_missing(a, b):
assert a.offset == 0
if a.null_count > 0 or b.null_count > 0:
# not optimal
nulls = pc.invert(pc.or_(a.is_null(), b.is_null()))
assert nulls.offset == 0
nulls_buffer = nulls.buffers()[1]
# this is not the case: no reason why it should be (TODO: open arrow issue)
# assert nulls.buffers()[0] is None
else:
nulls_buffer = None
buffers = a.buffers()
return pa.Array.from_buffers(a.type, len(a), [nulls_buffer] + buffers[1:])
|
https://github.com/vaexio/vaex/issues/1101
|
AttributeError Traceback (most recent call last)
<ipython-input-16-cc5013060237> in <module>
----> 1 df.column_a.mean()
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/expression.py in mean(self, binby, limits, shape, selection, delay, progress)
550 del kwargs['self']
551 kwargs['expression'] = self.expression
--> 552 return self.ds.mean(**kwargs)
553
554 def std(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in mean(self, expression, binby, limits, shape, selection, delay, progress, edges, array_type)
851 :return: {return_stat_scalar}
852 """
--> 853 return self._compute_agg('mean', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
854 logger.debug("mean of %r, with binby=%r, limits=%r, shape=%r, selection=%r, delay=%r", expression, binby, limits, shape, selection, delay)
855 expression = _ensure_strings_from_expressions(expression)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _compute_agg(self, name, expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions, array_type)
683 # give errors when evaluated with NaN's present.
684 # TODO: GET RID OF THIS
--> 685 len(self) # fill caches and masks
686 # pass
687 grid = self._create_grid(binby, limits, shape, selection=selection, delay=True)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in __len__(self)
3763 else:
3764 if self._cached_filtered_length is None:
-> 3765 self. _cached_filtered_length = int(self.count())
3766 return self._cached_filtered_length
3767
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in count(self, expression, binby, limits, shape, selection, delay, edges, progress, array_type)
768 :return: {return_stat_scalar}
769 """
--> 770 return self._compute_agg('count', expression, binby, limits, shape, selection, delay, edges, progress, array_type=array_type)
771
772 @delayed
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _compute_agg(self, name, expression, binby, limits, shape, selection, delay, edges, progress, extra_expressions, array_type)
742 stats = [compute(expression, grid, selection=selection, edges=edges, progressbar=progressbar) for expression in expressions]
743 var = finish(grid, *stats)
--> 744 return self._delay(delay, var)
745
746 @docsubst
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _delay(self, delay, task, progressbar)
1506 return task
1507 else:
-> 1508 self.execute()
1509 return task.get()
1510
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in execute(self)
343 '''Execute all delayed jobs.'''
344 from .asyncio import just_run
--> 345 just_run(self.execute_async())
346
347 async def execute_async(self):
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/asyncio.py in just_run(coro)
33 nest_asyncio.apply()
34 check_patch_tornado()
---> 35 return loop.run_until_complete(coro)
36 finally:
37 if not had_loop: # remove loop if we did not have one
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/nest_asyncio.py in run_until_complete(self, future)
96 raise RuntimeError(
97 'Event loop stopped before Future completed.')
---> 98 return f.result()
99 finally:
100 self._thread_id = old_thread_id
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/tasks.py in _step(***failed resolving arguments***)
180 result = coro.send(None)
181 else:
--> 182 result = coro.throw(exc)
183 except StopIteration as exc:
184 if self._must_cancel:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in execute_async(self)
348 '''Async version of execute'''
349 # no need to clear _task_aggs anymore, since they will be removed for the executors' task list
--> 350 await self.executor.execute_async()
351
352 @property
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/execution.py in execute_async(self)
182 all([all(task.signal_progress.emit(p)) for task in tasks]) and
183 all([not task.cancelled for task in tasks]),
--> 184 cancel=lambda: self._cancel(run), unpack=True, run=run):
185 pass # just eat all element
186 logger.debug("executing took %r seconds" % (time.time() - t0))
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/multithreading.py in map_async(self, callable, iterator, count, on_error, progress, cancel, unpack, **kwargs_extra)
88 total = 0
89 for i, value in buffer(enumerate(iterator), self._max_workers + 3):
---> 90 value = await value
91 if value != None:
92 total += value
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/futures.py in __iter__(self)
325 if not self.done():
326 self._asyncio_future_blocking = True
--> 327 yield self # This tells Task to wait for completion.
328 assert self.done(), "yield from wasn't used with future"
329 return self.result() # May raise too.
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/tasks.py in _wakeup(self, future)
248 def _wakeup(self, future):
249 try:
--> 250 future.result()
251 except Exception as exc:
252 # This may also be a cancellation.
~/.conda-envs/datalakeutils/lib/python3.6/asyncio/futures.py in result(self)
241 self._log_traceback = False
242 if self._exception is not None:
--> 243 raise self._exception
244 return self._result
245
~/.conda-envs/datalakeutils/lib/python3.6/concurrent/futures/thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/multithreading.py in <lambda>(value)
84 else:
85 loop = asyncio.get_event_loop()
---> 86 iterator = (loop.run_in_executor(self, lambda value=value: wrapped(value)) for value in iterator)
87
88 total = 0
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/multithreading.py in wrapped(*args, **kwargs)
76 # print("SLEEP", self._debug_sleep)
77 time.sleep(self._debug_sleep)
---> 78 return callable(self.local.index, *args, **kwargs, **kwargs_extra)
79 # convert to list so we can count
80 time_last = time.time() - 100
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/execution.py in process_part(self, thread_index, i1, i2, chunks, run)
232 assert len(chunk) == N, f'Oops, got a chunk ({name}) of length {len(chunk)} while it is expected to be of length {N} (at {i1}-{i2}'
233 if run.pre_filter:
--> 234 filter_mask = df.evaluate_selection_mask(None, i1=i1, i2=i2, cache=True)
235 chunks = {k:vaex.array_types.filter(v, filter_mask) for k, v, in chunks.items()}
236 else:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in evaluate_selection_mask(self, name, i1, i2, selection, cache, filtered, pre_filtered)
2578 if name in [None, False] and self.filtered and filtered:
2579 scope_global = scopes._BlockScopeSelection(self, i1, i2, None, cache=cache)
-> 2580 mask_global = scope_global.evaluate(FILTER_SELECTION_NAME)
2581 return vaex.utils.unmask_selection_mask(mask_global)
2582 elif self.filtered and filtered and name != FILTER_SELECTION_NAME:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/scopes.py in evaluate(self, expression)
181 try:
182 expression = _ensure_string_from_expression(expression)
--> 183 result = eval(expression, expression_namespace, self)
184 except:
185 import traceback as tb
<string> in <module>
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/scopes.py in __getitem__(self, variable)
217 if variable in self.df.variables:
218 return self.df.variables[variable]
--> 219 mask_values = selection.evaluate(self.df, variable, self.i1, self.i2, self.filter_mask)
220
221 # get a view on a subset of the mask
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/selections.py in evaluate(self, df, name, i1, i2, filter_mask)
128 else:
129 previous_mask = None
--> 130 result = df._evaluate_selection_mask(self.boolean_expression, i1, i2, filter_mask=filter_mask)
131 if isinstance(result, bool):
132 N = i2 - i1
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/dataframe.py in _evaluate_selection_mask(self, name, i1, i2, selection, cache, filter_mask)
2564 i2 = i2 or len(self)
2565 scope = scopes._BlockScopeSelection(self, i1, i2, selection, cache=cache, filter_mask=filter_mask)
-> 2566 mask = scope.evaluate(name)
2567 # TODO: can we do without arrow->numpy conversion?
2568 mask = vaex.array_types.to_numpy(mask)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/scopes.py in evaluate(self, expression)
181 try:
182 expression = _ensure_string_from_expression(expression)
--> 183 result = eval(expression, expression_namespace, self)
184 except:
185 import traceback as tb
<string> in <module>
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/arrow/numpy_dispatch.py in operator(a, b)
85 result_data = op['op'](a_data, b_data)
86 if isinstance(a, NumpyDispatch):
---> 87 result_data = a.add_missing(result_data)
88 if isinstance(b, NumpyDispatch):
89 result_data = b.add_missing(result_data)
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/arrow/numpy_dispatch.py in add_missing(self, ar)
39 if isinstance(self._array, vaex.array_types.supported_arrow_array_types):
40 ar = vaex.array_types.to_arrow(ar)
---> 41 ar = combine_missing(ar, self._array)
42 # else: both numpy, handled by numpy
43 else:
~/.conda-envs/datalakeutils/lib/python3.6/site-packages/vaex/arrow/numpy_dispatch.py in combine_missing(a, b)
11 # not optimal
12 nulls = pc.invert(pc.or_(a.is_null(), b.is_null()))
---> 13 assert nulls.offset == 0
14 nulls_buffer = nulls.buffers()[1]
15 # this is not the case: no reason why it should be (TODO: open arrow issue)
AttributeError: 'pyarrow.lib.ChunkedArray' object has no attribute 'offset'
|
AttributeError
|
def open(path, fs_options):
source = vaex.file.open_for_arrow(
path=path, mode="rb", fs_options=fs_options, mmap=True
)
file_signature = source.read(6)
is_arrow_file = file_signature == b"ARROW1"
source.seek(0)
if is_arrow_file:
reader = pa.ipc.open_file(source)
# for some reason this reader is not iterable
batches = [reader.get_batch(i) for i in range(reader.num_record_batches)]
else:
reader = pa.ipc.open_stream(source)
batches = reader # this reader is iterable
table = pa.Table.from_batches(batches)
return from_table(table)
|
def open(path, fs_options):
source = vaex.file.open_for_arrow(
path=path, mode="rb", fs_options=fs_options, mmap=True
)
try:
# first we try if it opens as stream
reader = pa.ipc.open_stream(source)
except pa.lib.ArrowInvalid:
# if not, we open as file
reader = pa.ipc.open_file(source)
# for some reason this reader is not iterable
batches = [reader.get_batch(i) for i in range(reader.num_record_batches)]
else:
# if a stream, we're good
batches = reader # this reader is iterable
table = pa.Table.from_batches(batches)
return from_table(table)
|
https://github.com/vaexio/vaex/issues/786
|
pyarrow.ipc.open_stream("feat.arrow")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/me/.local/lib/python3.8/site-packages/pyarrow/ipc.py", line 146, in open_stream
return RecordBatchStreamReader(source)
File "/home/me/.local/lib/python3.8/site-packages/pyarrow/ipc.py", line 62, in __init__
self._open(source)
File "pyarrow/ipc.pxi", line 360, in pyarrow.lib._RecordBatchStreamReader._open
File "pyarrow/error.pxi", line 123, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 100, in pyarrow.lib.check_status
OSError: Invalid flatbuffers message.
|
OSError
|
def export_arrow(
self,
to,
progress=None,
chunk_size=default_chunk_size,
parallel=True,
reduce_large=True,
fs_options=None,
as_stream=True,
):
"""Exports the DataFrame to a file of stream written with arrow
:param to: filename, file object, or :py:data:`pyarrow.RecordBatchStreamWriter`, py:data:`pyarrow.RecordBatchFileWriter` or :py:data:`pyarrow.parquet.ParquetWriter`
:param progress: {progress}
:param int chunk_size: {chunk_size_export}
:param bool parallel: {evaluate_parallel}
:param bool reduce_large: If True, convert arrow large_string type to string type
:param dict fs_options: Coming soon...
:param bool as_stream: Write as an Arrow stream if true, else a file.
see also https://arrow.apache.org/docs/format/Columnar.html?highlight=arrow1#ipc-file-format
:return:
"""
progressbar = vaex.utils.progressbars(progress)
def write(writer):
progressbar(0)
N = len(self)
if chunk_size:
for i1, i2, table in self.to_arrow_table(
chunk_size=chunk_size, parallel=parallel, reduce_large=reduce_large
):
writer.write_table(table)
progressbar(i2 / N)
progressbar(1.0)
else:
table = self.to_arrow_table(
chunk_size=chunk_size, parallel=parallel, reduce_large=reduce_large
)
writer.write_table(table)
if isinstance(to, str):
schema = (
self[0:1].to_arrow_table(parallel=False, reduce_large=reduce_large).schema
)
fs_options = fs_options or {}
with vaex.file.open_for_arrow(
path=to, mode="wb", fs_options=fs_options
) as sink:
if as_stream:
with pa.RecordBatchStreamWriter(sink, schema) as writer:
write(writer)
else:
with pa.RecordBatchFileWriter(sink, schema) as writer:
write(writer)
else:
write(to)
|
def export_arrow(
self,
to,
progress=None,
chunk_size=default_chunk_size,
parallel=True,
reduce_large=True,
fs_options=None,
):
"""Exports the DataFrame to a file of stream written with arrow
:param to: filename, file object, or :py:data:`pyarrow.RecordBatchStreamWriter`, py:data:`pyarrow.RecordBatchFileWriter` or :py:data:`pyarrow.parquet.ParquetWriter`
:param progress: {progress}
:param int chunk_size: {chunk_size_export}
:param bool parallel: {evaluate_parallel}
:param bool reduce_large: If True, convert arrow large_string type to string type
:param dict fs_options: Coming soon...
:return:
"""
progressbar = vaex.utils.progressbars(progress)
def write(writer):
progressbar(0)
N = len(self)
if chunk_size:
for i1, i2, table in self.to_arrow_table(
chunk_size=chunk_size, parallel=parallel, reduce_large=reduce_large
):
writer.write_table(table)
progressbar(i2 / N)
progressbar(1.0)
else:
table = self.to_arrow_table(
chunk_size=chunk_size, parallel=parallel, reduce_large=reduce_large
)
writer.write_table(table)
if isinstance(to, str):
schema = (
self[0:1].to_arrow_table(parallel=False, reduce_large=reduce_large).schema
)
fs_options = fs_options or {}
with vaex.file.open_for_arrow(
path=to, mode="wb", fs_options=fs_options
) as sink:
writer = pa.RecordBatchStreamWriter(sink, schema)
write(writer)
else:
write(to)
|
https://github.com/vaexio/vaex/issues/786
|
pyarrow.ipc.open_stream("feat.arrow")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/me/.local/lib/python3.8/site-packages/pyarrow/ipc.py", line 146, in open_stream
return RecordBatchStreamReader(source)
File "/home/me/.local/lib/python3.8/site-packages/pyarrow/ipc.py", line 62, in __init__
self._open(source)
File "pyarrow/ipc.pxi", line 360, in pyarrow.lib._RecordBatchStreamReader._open
File "pyarrow/error.pxi", line 123, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 100, in pyarrow.lib.check_status
OSError: Invalid flatbuffers message.
|
OSError
|
def validate_expression(expr, variable_set, function_set=[], names=None):
global last_func
names = names if names is not None else []
if isinstance(expr, six.string_types):
node = ast.parse(expr)
if len(node.body) != 1:
raise ValueError("expected one expression, got %r" % len(node.body))
first_expr = node.body[0]
if not isinstance(first_expr, _ast.Expr):
raise ValueError("expected an expression got a %r" % type(node.body))
validate_expression(first_expr.value, variable_set, function_set, names)
elif isinstance(expr, _ast.BinOp):
if expr.op.__class__ in valid_binary_operators:
validate_expression(expr.right, variable_set, function_set, names)
validate_expression(expr.left, variable_set, function_set, names)
else:
raise ValueError("Binary operator not allowed: %r" % expr.op)
elif isinstance(expr, _ast.UnaryOp):
if expr.op.__class__ in valid_unary_operators:
validate_expression(expr.operand, variable_set, function_set, names)
else:
raise ValueError("Unary operator not allowed: %r" % expr.op)
elif isinstance(expr, _ast.Name):
if expr.id not in variable_set:
matches = difflib.get_close_matches(expr.id, list(variable_set))
msg = "Column or variable %r does not exist." % expr.id
if matches:
msg += " Did you mean: " + " or ".join(map(repr, matches))
raise NameError(msg)
names.append(expr.id)
elif isinstance(expr, ast_Num):
pass # numbers are fine
elif isinstance(expr, ast_Str):
pass # as well as strings
elif isinstance(expr, _ast.Call):
validate_func(expr.func, function_set)
last_func = expr
for arg in expr.args:
validate_expression(arg, variable_set, function_set, names)
for arg in expr.keywords:
validate_expression(arg, variable_set, function_set, names)
elif isinstance(expr, _ast.Compare):
validate_expression(expr.left, variable_set, function_set, names)
for op in expr.ops:
if op.__class__ not in valid_compare_operators:
raise ValueError("Compare operator not allowed: %r" % op)
for comparator in expr.comparators:
validate_expression(comparator, variable_set, function_set, names)
elif isinstance(expr, _ast.keyword):
validate_expression(expr.value, variable_set, function_set, names)
elif isinstance(expr, ast_Constant):
pass # like True and False
elif isinstance(expr, _ast.Subscript):
validate_expression(expr.value, variable_set, function_set, names)
if isinstance(expr.slice.value, ast_Num):
pass # numbers are fine
elif isinstance(expr.slice.value, _ast.Str):
pass # and strings
else:
raise ValueError(
"Only subscript/slices with numbers allowed, not: %r" % expr.slice.value
)
else:
last_func = expr
raise ValueError("Unknown expression type: %r" % type(expr))
|
def validate_expression(expr, variable_set, function_set=[], names=None):
global last_func
names = names if names is not None else []
if isinstance(expr, six.string_types):
node = ast.parse(expr)
if len(node.body) != 1:
raise ValueError("expected one expression, got %r" % len(node.body))
first_expr = node.body[0]
if not isinstance(first_expr, _ast.Expr):
raise ValueError("expected an expression got a %r" % type(node.body))
validate_expression(first_expr.value, variable_set, function_set, names)
elif isinstance(expr, _ast.BinOp):
if expr.op.__class__ in valid_binary_operators:
validate_expression(expr.right, variable_set, function_set, names)
validate_expression(expr.left, variable_set, function_set, names)
else:
raise ValueError("Binary operator not allowed: %r" % expr.op)
elif isinstance(expr, _ast.UnaryOp):
if expr.op.__class__ in valid_unary_operators:
validate_expression(expr.operand, variable_set, function_set, names)
else:
raise ValueError("Unary operator not allowed: %r" % expr.op)
elif isinstance(expr, _ast.Name):
validate_id(expr.id)
if expr.id not in variable_set:
matches = difflib.get_close_matches(expr.id, list(variable_set))
msg = "Column or variable %r does not exist." % expr.id
if matches:
msg += " Did you mean: " + " or ".join(map(repr, matches))
raise NameError(msg)
names.append(expr.id)
elif isinstance(expr, ast_Num):
pass # numbers are fine
elif isinstance(expr, ast_Str):
pass # as well as strings
elif isinstance(expr, _ast.Call):
validate_func(expr.func, function_set)
last_func = expr
for arg in expr.args:
validate_expression(arg, variable_set, function_set, names)
for arg in expr.keywords:
validate_expression(arg, variable_set, function_set, names)
elif isinstance(expr, _ast.Compare):
validate_expression(expr.left, variable_set, function_set, names)
for op in expr.ops:
if op.__class__ not in valid_compare_operators:
raise ValueError("Compare operator not allowed: %r" % op)
for comparator in expr.comparators:
validate_expression(comparator, variable_set, function_set, names)
elif isinstance(expr, _ast.keyword):
validate_expression(expr.value, variable_set, function_set, names)
elif isinstance(expr, ast_Constant):
pass # like True and False
elif isinstance(expr, _ast.Subscript):
validate_expression(expr.value, variable_set, function_set, names)
if isinstance(expr.slice.value, ast_Num):
pass # numbers are fine
elif isinstance(expr.slice.value, _ast.Str):
pass # and strings
else:
raise ValueError(
"Only subscript/slices with numbers allowed, not: %r" % expr.slice.value
)
else:
last_func = expr
raise ValueError("Unknown expression type: %r" % type(expr))
|
https://github.com/vaexio/vaex/issues/949
|
ValueError Traceback (most recent call last)
<ipython-input-59-b91ac46b6ba4> in <module>
----> 1 df.groupby(vaex.BinnerTime(df.字段1)).agg('count')
C:\ProgramData\anaconda3\lib\site-packages\vaex\groupby.py in __init__(self, expression, resolution, df, every)
57 self.begin_name = self.df.add_variable('t_begin', self.tmin.astype(self.resolution_type), unique=True)
58 # TODO: import integer from future?
---> 59 self.binby_expression = str(self.df['%s - %s' % (self.expression.astype(self.resolution_type), self.begin_name)].astype('int') // every)
60 self.binner = self.df._binner_ordinal(self.binby_expression, self.N)
61
C:\ProgramData\anaconda3\lib\site-packages\vaex\dataframe.py in __getitem__(self, item)
4585 # if item in self._virtual_expressions:
4586 # return self._virtual_expressions[item]
-> 4587 self.validate_expression(item)
4588 return Expression(self, item) # TODO we'd like to return the same expression if possible
4589 elif isinstance(item, Expression):
C:\ProgramData\anaconda3\lib\site-packages\vaex\dataframe.py in validate_expression(self, expression)
2899 funcs = set(expression_namespace.keys()) | set(self.functions.keys())
2900 try:
-> 2901 return vaex.expresso.validate_expression(expression, vars, funcs)
2902 except NameError as e:
2903 raise NameError(str(e)) from None
C:\ProgramData\anaconda3\lib\site-packages\vaex\expresso.py in validate_expression(expr, variable_set, function_set, names)
77 type(node.body))
78 validate_expression(first_expr.value, variable_set,
---> 79 function_set, names)
80 elif isinstance(expr, _ast.BinOp):
81 if expr.op.__class__ in valid_binary_operators:
C:\ProgramData\anaconda3\lib\site-packages\vaex\expresso.py in validate_expression(expr, variable_set, function_set, names)
81 if expr.op.__class__ in valid_binary_operators:
82 validate_expression(expr.right, variable_set, function_set, names)
---> 83 validate_expression(expr.left, variable_set, function_set, names)
84 else:
85 raise ValueError("Binary operator not allowed: %r" % expr.op)
C:\ProgramData\anaconda3\lib\site-packages\vaex\expresso.py in validate_expression(expr, variable_set, function_set, names)
108 last_func = expr
109 for arg in expr.args:
--> 110 validate_expression(arg, variable_set, function_set, names)
111 for arg in expr.keywords:
112 validate_expression(arg, variable_set, function_set, names)
C:\ProgramData\anaconda3\lib\site-packages\vaex\expresso.py in validate_expression(expr, variable_set, function_set, names)
91 raise ValueError("Unary operator not allowed: %r" % expr.op)
92 elif isinstance(expr, _ast.Name):
---> 93 validate_id(expr.id)
94 if expr.id not in variable_set:
95 matches = difflib.get_close_matches(expr.id, list(variable_set))
C:\ProgramData\anaconda3\lib\site-packages\vaex\expresso.py in validate_id(id)
570 for char in id:
571 if char not in valid_id_characters:
--> 572 raise ValueError("invalid character %r in id %r" % (char, id))
ValueError: invalid character '字' in id '字段1'
|
ValueError
|
def _as_table(self, i1, i2, j1=None, j2=None, format="html"):
from .formatting import _format_value
parts = [] # """<div>%s (length=%d)</div>""" % (self.name, len(self))]
parts += ["<table class='table-striped'>"]
aliases_reverse = {value: key for key, value in self._column_aliases.items()}
# we need to get the underlying names since we use df.evaluate
column_names = self.get_column_names(alias=False)
values_list = []
values_list.append(["#", []])
# parts += ["<thead><tr>"]
for name in column_names:
values_list.append([aliases_reverse.get(name, name), []])
# parts += ["<th>%s</th>" % name]
# parts += ["</tr></thead>"]
def table_part(k1, k2, parts):
N = k2 - k1
# slicing will invoke .extract which will make the evaluation
# much quicker
df = self[k1:k2]
try:
values = dict(zip(column_names, df.evaluate(column_names)))
except:
values = {}
for i, name in enumerate(column_names):
try:
values[name] = df.evaluate(name)
except:
values[name] = ["error"] * (N)
logger.exception(
"error evaluating: %s at rows %i-%i" % (name, k1, k2)
)
for i in range(k2 - k1):
# parts += ["<tr>"]
# parts += ["<td><i style='opacity: 0.6'>{:,}</i></td>".format(i + k1)]
if format == "html":
value = "<i style='opacity: 0.6'>{:,}</i>".format(i + k1)
else:
value = "{:,}".format(i + k1)
values_list[0][1].append(value)
for j, name in enumerate(column_names):
value = values[name][i]
value = _format_value(value)
values_list[j + 1][1].append(value)
# parts += ["</tr>"]
# return values_list
if i2 - i1 > 0:
parts = table_part(i1, i2, parts)
if j1 is not None and j2 is not None:
values_list[0][1].append("...")
for i in range(len(column_names)):
# parts += ["<td>...</td>"]
values_list[i + 1][1].append("...")
# parts = table_part(j1, j2, parts)
table_part(j1, j2, parts)
else:
for header, values in values_list:
values.append(None)
# parts += "</table>"
# html = "".join(parts)
# return html
values_list = dict(values_list)
# print(values_list)
import tabulate
table_text = tabulate.tabulate(values_list, headers="keys", tablefmt=format)
if i2 - i1 == 0:
if self._length_unfiltered != len(self):
footer_text = "No rows to display (because of filtering)."
else:
footer_text = "No rows to display."
if format == "html":
table_text += f"<i>{footer_text}</i>"
if format == "plain":
table_text += f"\n{footer_text}"
return table_text
|
def _as_table(self, i1, i2, j1=None, j2=None, format="html"):
from .formatting import _format_value
parts = [] # """<div>%s (length=%d)</div>""" % (self.name, len(self))]
parts += ["<table class='table-striped'>"]
aliases_reverse = {value: key for key, value in self._column_aliases.items()}
column_names = self.get_column_names()
values_list = []
values_list.append(["#", []])
# parts += ["<thead><tr>"]
for name in column_names:
values_list.append([aliases_reverse.get(name, name), []])
# parts += ["<th>%s</th>" % name]
# parts += ["</tr></thead>"]
def table_part(k1, k2, parts):
N = k2 - k1
# slicing will invoke .extract which will make the evaluation
# much quicker
df = self[k1:k2]
try:
values = dict(zip(column_names, df.evaluate(column_names)))
except:
values = {}
for i, name in enumerate(column_names):
try:
values[name] = df.evaluate(name)
except:
values[name] = ["error"] * (N)
logger.exception(
"error evaluating: %s at rows %i-%i" % (name, k1, k2)
)
for i in range(k2 - k1):
# parts += ["<tr>"]
# parts += ["<td><i style='opacity: 0.6'>{:,}</i></td>".format(i + k1)]
if format == "html":
value = "<i style='opacity: 0.6'>{:,}</i>".format(i + k1)
else:
value = "{:,}".format(i + k1)
values_list[0][1].append(value)
for j, name in enumerate(column_names):
value = values[name][i]
value = _format_value(value)
values_list[j + 1][1].append(value)
# parts += ["</tr>"]
# return values_list
if i2 - i1 > 0:
parts = table_part(i1, i2, parts)
if j1 is not None and j2 is not None:
values_list[0][1].append("...")
for i in range(len(column_names)):
# parts += ["<td>...</td>"]
values_list[i + 1][1].append("...")
# parts = table_part(j1, j2, parts)
table_part(j1, j2, parts)
else:
for header, values in values_list:
values.append(None)
# parts += "</table>"
# html = "".join(parts)
# return html
values_list = dict(values_list)
# print(values_list)
import tabulate
table_text = tabulate.tabulate(values_list, headers="keys", tablefmt=format)
if i2 - i1 == 0:
if self._length_unfiltered != len(self):
footer_text = "No rows to display (because of filtering)."
else:
footer_text = "No rows to display."
if format == "html":
table_text += f"<i>{footer_text}</i>"
if format == "plain":
table_text += f"\n{footer_text}"
return table_text
|
https://github.com/vaexio/vaex/issues/378
|
could not convert column 0, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 0, error: TypeError('getattr(): attribute name must be string',)
could not convert column 1, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 1, error: TypeError('getattr(): attribute name must be string',)
could not convert column 2, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 2, error: TypeError('getattr(): attribute name must be string',)
could not convert column 3, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 3, error: TypeError('getattr(): attribute name must be string',)
could not convert column 4, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 4, error: TypeError('getattr(): attribute name must be string',)
could not convert column 5, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 5, error: TypeError('getattr(): attribute name must be string',)
could not convert column 6, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 6, error: TypeError('getattr(): attribute name must be string',)
could not convert column 7, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 7, error: TypeError('getattr(): attribute name must be string',)
could not convert column 8, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 8, error: TypeError('getattr(): attribute name must be string',)
could not convert column 9, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 9, error: TypeError('getattr(): attribute name must be string',)
could not convert column 10, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 10, error: TypeError('getattr(): attribute name must be string',)
could not convert column 11, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 11, error: TypeError('getattr(): attribute name must be string',)
could not convert column 12, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 12, error: TypeError('getattr(): attribute name must be string',)
could not convert column 13, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 13, error: TypeError('getattr(): attribute name must be string',)
could not convert column 14, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 14, error: TypeError('getattr(): attribute name must be string',)
could not convert column 15, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 15, error: TypeError('getattr(): attribute name must be string',)
could not convert column 16, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 16, error: TypeError('getattr(): attribute name must be string',)
could not convert column 17, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 17, error: TypeError('getattr(): attribute name must be string',)
could not convert column 18, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 18, error: TypeError('getattr(): attribute name must be string',)
could not convert column 19, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 19, error: TypeError('getattr(): attribute name must be string',)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in __call__(self, obj, include, exclude)
968 d['include'] = include
969 d['exclude'] = exclude
--> 970 return method(**d)
971 return None
972 else:
5 frames
/usr/local/lib/python3.6/dist-packages/vaex/dataframe.py in column_filter(name)
3486 if not strings and (self.dtype(name) == str_type or self.dtype(name).type == np.string_):
3487 return False
-> 3488 if not hidden and name.startswith('__'):
3489 return False
3490 return True
AttributeError: 'int' object has no attribute 'startswith'
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-a7b44e392414> in <module>()
3 df = pd.DataFrame(X)
4 df['target'] = y
----> 5 vaex.from_pandas(df)
2 frames
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in format(self, obj, include, exclude)
150 return {}, {}
151
--> 152 format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
153
154 if format_dict or md_dict:
TypeError: 'NoneType' object is not iterable
|
AttributeError
|
def get_column_names(
self, virtual=True, strings=True, hidden=False, regex=None, alias=True
):
"""Return a list of column names
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, x2=2, y=3, s='string')
>>> df['r'] = (df.x**2 + df.y**2)**2
>>> df.get_column_names()
['x', 'x2', 'y', 's', 'r']
>>> df.get_column_names(virtual=False)
['x', 'x2', 'y', 's']
>>> df.get_column_names(regex='x.*')
['x', 'x2']
:param virtual: If False, skip virtual columns
:param hidden: If False, skip hidden columns
:param strings: If False, skip string columns
:param regex: Only return column names matching the (optional) regular expression
:param alias: Return the alias (True) or internal name (False).
:rtype: list of str
"""
aliases_reverse = (
{value: key for key, value in self._column_aliases.items()} if alias else {}
)
def column_filter(name):
"""Return True if column with specified name should be returned"""
if regex and not re.match(regex, name):
return False
if not virtual and name in self.virtual_columns:
return False
if not strings and (
self.dtype(name) == str_type or self.dtype(name).type == np.string_
):
return False
if not hidden and name.startswith("__"):
return False
return True
if hidden and virtual and regex is None and not alias:
return list(self.column_names) # quick path
if not hidden and virtual and regex is None and not alias:
return [
k for k in self.column_names if not k.startswith("__")
] # also a quick path
return [
aliases_reverse.get(name, name)
for name in self.column_names
if column_filter(name)
]
|
def get_column_names(self, virtual=True, strings=True, hidden=False, regex=None):
"""Return a list of column names
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, x2=2, y=3, s='string')
>>> df['r'] = (df.x**2 + df.y**2)**2
>>> df.get_column_names()
['x', 'x2', 'y', 's', 'r']
>>> df.get_column_names(virtual=False)
['x', 'x2', 'y', 's']
>>> df.get_column_names(regex='x.*')
['x', 'x2']
:param virtual: If False, skip virtual columns
:param hidden: If False, skip hidden columns
:param strings: If False, skip string columns
:param regex: Only return column names matching the (optional) regular expression
:rtype: list of str
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, x2=2, y=3, s='string')
>>> df['r'] = (df.x**2 + df.y**2)**2
>>> df.get_column_names()
['x', 'x2', 'y', 's', 'r']
>>> df.get_column_names(virtual=False)
['x', 'x2', 'y', 's']
>>> df.get_column_names(regex='x.*')
['x', 'x2']
"""
def column_filter(name):
"""Return True if column with specified name should be returned"""
if regex and not re.match(regex, name):
return False
if not virtual and name in self.virtual_columns:
return False
if not strings and (
self.dtype(name) == str_type or self.dtype(name).type == np.string_
):
return False
if not hidden and name.startswith("__"):
return False
return True
if hidden and virtual and regex is None:
return list(self.column_names) # quick path
if not hidden and virtual and regex is None:
return [
k for k in self.column_names if not k.startswith("__")
] # also a quick path
return [name for name in self.column_names if column_filter(name)]
|
https://github.com/vaexio/vaex/issues/378
|
could not convert column 0, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 0, error: TypeError('getattr(): attribute name must be string',)
could not convert column 1, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 1, error: TypeError('getattr(): attribute name must be string',)
could not convert column 2, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 2, error: TypeError('getattr(): attribute name must be string',)
could not convert column 3, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 3, error: TypeError('getattr(): attribute name must be string',)
could not convert column 4, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 4, error: TypeError('getattr(): attribute name must be string',)
could not convert column 5, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 5, error: TypeError('getattr(): attribute name must be string',)
could not convert column 6, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 6, error: TypeError('getattr(): attribute name must be string',)
could not convert column 7, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 7, error: TypeError('getattr(): attribute name must be string',)
could not convert column 8, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 8, error: TypeError('getattr(): attribute name must be string',)
could not convert column 9, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 9, error: TypeError('getattr(): attribute name must be string',)
could not convert column 10, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 10, error: TypeError('getattr(): attribute name must be string',)
could not convert column 11, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 11, error: TypeError('getattr(): attribute name must be string',)
could not convert column 12, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 12, error: TypeError('getattr(): attribute name must be string',)
could not convert column 13, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 13, error: TypeError('getattr(): attribute name must be string',)
could not convert column 14, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 14, error: TypeError('getattr(): attribute name must be string',)
could not convert column 15, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 15, error: TypeError('getattr(): attribute name must be string',)
could not convert column 16, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 16, error: TypeError('getattr(): attribute name must be string',)
could not convert column 17, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 17, error: TypeError('getattr(): attribute name must be string',)
could not convert column 18, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 18, error: TypeError('getattr(): attribute name must be string',)
could not convert column 19, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 19, error: TypeError('getattr(): attribute name must be string',)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in __call__(self, obj, include, exclude)
968 d['include'] = include
969 d['exclude'] = exclude
--> 970 return method(**d)
971 return None
972 else:
5 frames
/usr/local/lib/python3.6/dist-packages/vaex/dataframe.py in column_filter(name)
3486 if not strings and (self.dtype(name) == str_type or self.dtype(name).type == np.string_):
3487 return False
-> 3488 if not hidden and name.startswith('__'):
3489 return False
3490 return True
AttributeError: 'int' object has no attribute 'startswith'
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-a7b44e392414> in <module>()
3 df = pd.DataFrame(X)
4 df['target'] = y
----> 5 vaex.from_pandas(df)
2 frames
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in format(self, obj, include, exclude)
150 return {}, {}
151
--> 152 format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
153
154 if format_dict or md_dict:
TypeError: 'NoneType' object is not iterable
|
AttributeError
|
def copy(self, column_names=None, virtual=True):
df = DataFrameArrays()
df._length_unfiltered = self._length_unfiltered
df._length_original = self._length_original
df._cached_filtered_length = self._cached_filtered_length
df._index_end = self._index_end
df._index_start = self._index_start
df._active_fraction = self._active_fraction
df._renamed_columns = list(self._renamed_columns)
df._column_aliases = dict(self._column_aliases)
df.units.update(self.units)
df.variables.update(self.variables) # we add all, could maybe only copy used
df._categories.update(self._categories)
if column_names is None:
column_names = self.get_column_names(hidden=True, alias=False)
all_column_names = self.get_column_names(hidden=True, alias=False)
# put in the selections (thus filters) in place
# so drop moves instead of really dropping it
df.functions.update(self.functions)
for key, value in self.selection_histories.items():
# TODO: selection_histories begin a defaultdict always gives
# us the filtered selection, so check if we really have a
# selection
if self.get_selection(key):
df.selection_histories[key] = list(value)
# the filter should never be modified, so we can share a reference
# except when we add filter on filter using
# df = df[df.x>0]
# df = df[df.x < 10]
# in that case we make a copy in __getitem__
if key == FILTER_SELECTION_NAME:
df._selection_masks[key] = self._selection_masks[key]
else:
df._selection_masks[key] = vaex.superutils.Mask(df._length_original)
# and make sure the mask is consistent with the cache chunks
np.asarray(df._selection_masks[key])[:] = np.asarray(
self._selection_masks[key]
)
for key, value in self.selection_history_indices.items():
if self.get_selection(key):
df.selection_history_indices[key] = value
# we can also copy the caches, which prevents recomputations of selections
df._selection_mask_caches[key] = collections.defaultdict(dict)
df._selection_mask_caches[key].update(self._selection_mask_caches[key])
if 1:
# print("-----", column_names)
depending = set()
added = set()
for name in column_names:
# print("add", name)
added.add(name)
if name in self.columns:
column = self.columns[name]
df.add_column(name, column, dtype=self._dtypes_override.get(name))
if isinstance(column, ColumnSparse):
df._sparse_matrices[name] = self._sparse_matrices[name]
elif name in self.virtual_columns:
if virtual: # TODO: check if the ast is cached
df.add_virtual_column(name, self.virtual_columns[name])
deps = [
key
for key, value in df._virtual_expressions[
name
].ast_names.items()
]
# print("add virtual", name, df._virtual_expressions[name].expression, deps)
depending.update(deps)
else:
# this might be an expression, create a valid name
self.validate_expression(name)
expression = name
name = vaex.utils.find_valid_name(name)
# add the expression
df[name] = df._expr(expression)
# then get the dependencies
deps = [
key
for key, value in df._virtual_expressions[name].ast_names.items()
]
depending.update(deps)
# print(depending, "after add")
# depending |= column_names
# print(depending)
# print(depending, "before filter")
if self.filtered:
selection = self.get_selection(FILTER_SELECTION_NAME)
depending |= selection._depending_columns(self)
depending.difference_update(added) # remove already added
# print(depending, "after filter")
# return depending_columns
hide = []
while depending:
new_depending = set()
for name in depending:
added.add(name)
if name in self.columns:
# print("add column", name)
df.add_column(
name, self.columns[name], dtype=self._dtypes_override.get(name)
)
# print("and hide it")
# df._hide_column(name)
hide.append(name)
elif name in self.virtual_columns:
if virtual: # TODO: check if the ast is cached
df.add_virtual_column(name, self.virtual_columns[name])
deps = [
key
for key, value in self._virtual_expressions[
name
].ast_names.items()
]
new_depending.update(deps)
# df._hide_column(name)
hide.append(name)
elif name in self.variables:
# if must be a variables?
# TODO: what if the variable depends on other variables
# we already add all variables
# df.add_variable(name, self.variables[name])
pass
# print("new_depending", new_depending)
new_depending.difference_update(added)
depending = new_depending
for name in hide:
df._hide_column(name)
else:
# we copy all columns, but drop the ones that are not wanted
# this makes sure that needed columns are hidden instead
def add_columns(columns):
for name in columns:
if name in self.columns:
df.add_column(
name, self.columns[name], dtype=self._dtypes_override.get(name)
)
elif name in self.virtual_columns:
if virtual:
df.add_virtual_column(name, self.virtual_columns[name])
else:
# this might be an expression, create a valid name
expression = name
name = vaex.utils.find_valid_name(name)
df[name] = df._expr(expression)
# to preserve the order, we first add the ones we want, then the rest
add_columns(column_names)
# then the rest
rest = set(all_column_names) - set(column_names)
add_columns(rest)
# and remove them
for name in rest:
# if the column should not have been added, drop it. This checks if columns need
# to be hidden instead, and expressions be rewritten.
if name not in column_names:
df.drop(name, inplace=True)
assert name not in df.get_column_names(hidden=True)
df.copy_metadata(self)
return df
|
def copy(self, column_names=None, virtual=True):
df = DataFrameArrays()
df._length_unfiltered = self._length_unfiltered
df._length_original = self._length_original
df._cached_filtered_length = self._cached_filtered_length
df._index_end = self._index_end
df._index_start = self._index_start
df._active_fraction = self._active_fraction
df._renamed_columns = list(self._renamed_columns)
df._column_aliases = dict(self._column_aliases)
df.units.update(self.units)
df.variables.update(self.variables) # we add all, could maybe only copy used
df._categories.update(self._categories)
if column_names is None:
column_names = self.get_column_names(hidden=True)
all_column_names = self.get_column_names(hidden=True)
# put in the selections (thus filters) in place
# so drop moves instead of really dropping it
df.functions.update(self.functions)
for key, value in self.selection_histories.items():
# TODO: selection_histories begin a defaultdict always gives
# us the filtered selection, so check if we really have a
# selection
if self.get_selection(key):
df.selection_histories[key] = list(value)
# the filter should never be modified, so we can share a reference
# except when we add filter on filter using
# df = df[df.x>0]
# df = df[df.x < 10]
# in that case we make a copy in __getitem__
if key == FILTER_SELECTION_NAME:
df._selection_masks[key] = self._selection_masks[key]
else:
df._selection_masks[key] = vaex.superutils.Mask(df._length_original)
# and make sure the mask is consistent with the cache chunks
np.asarray(df._selection_masks[key])[:] = np.asarray(
self._selection_masks[key]
)
for key, value in self.selection_history_indices.items():
if self.get_selection(key):
df.selection_history_indices[key] = value
# we can also copy the caches, which prevents recomputations of selections
df._selection_mask_caches[key] = collections.defaultdict(dict)
df._selection_mask_caches[key].update(self._selection_mask_caches[key])
if 1:
# print("-----", column_names)
depending = set()
added = set()
for name in column_names:
# print("add", name)
added.add(name)
if name in self.columns:
column = self.columns[name]
df.add_column(name, column, dtype=self._dtypes_override.get(name))
if isinstance(column, ColumnSparse):
df._sparse_matrices[name] = self._sparse_matrices[name]
elif name in self.virtual_columns:
if virtual: # TODO: check if the ast is cached
df.add_virtual_column(name, self.virtual_columns[name])
deps = [
key
for key, value in df._virtual_expressions[
name
].ast_names.items()
]
# print("add virtual", name, df._virtual_expressions[name].expression, deps)
depending.update(deps)
else:
# this might be an expression, create a valid name
self.validate_expression(name)
expression = name
name = vaex.utils.find_valid_name(name)
# add the expression
df[name] = df._expr(expression)
# then get the dependencies
deps = [
key
for key, value in df._virtual_expressions[name].ast_names.items()
]
depending.update(deps)
# print(depending, "after add")
# depending |= column_names
# print(depending)
# print(depending, "before filter")
if self.filtered:
selection = self.get_selection(FILTER_SELECTION_NAME)
depending |= selection._depending_columns(self)
depending.difference_update(added) # remove already added
# print(depending, "after filter")
# return depending_columns
hide = []
while depending:
new_depending = set()
for name in depending:
added.add(name)
if name in self.columns:
# print("add column", name)
df.add_column(
name, self.columns[name], dtype=self._dtypes_override.get(name)
)
# print("and hide it")
# df._hide_column(name)
hide.append(name)
elif name in self.virtual_columns:
if virtual: # TODO: check if the ast is cached
df.add_virtual_column(name, self.virtual_columns[name])
deps = [
key
for key, value in self._virtual_expressions[
name
].ast_names.items()
]
new_depending.update(deps)
# df._hide_column(name)
hide.append(name)
elif name in self.variables:
# if must be a variables?
# TODO: what if the variable depends on other variables
# we already add all variables
# df.add_variable(name, self.variables[name])
pass
# print("new_depending", new_depending)
new_depending.difference_update(added)
depending = new_depending
for name in hide:
df._hide_column(name)
else:
# we copy all columns, but drop the ones that are not wanted
# this makes sure that needed columns are hidden instead
def add_columns(columns):
for name in columns:
if name in self.columns:
df.add_column(
name, self.columns[name], dtype=self._dtypes_override.get(name)
)
elif name in self.virtual_columns:
if virtual:
df.add_virtual_column(name, self.virtual_columns[name])
else:
# this might be an expression, create a valid name
expression = name
name = vaex.utils.find_valid_name(name)
df[name] = df._expr(expression)
# to preserve the order, we first add the ones we want, then the rest
add_columns(column_names)
# then the rest
rest = set(all_column_names) - set(column_names)
add_columns(rest)
# and remove them
for name in rest:
# if the column should not have been added, drop it. This checks if columns need
# to be hidden instead, and expressions be rewritten.
if name not in column_names:
df.drop(name, inplace=True)
assert name not in df.get_column_names(hidden=True)
df.copy_metadata(self)
return df
|
https://github.com/vaexio/vaex/issues/378
|
could not convert column 0, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 0, error: TypeError('getattr(): attribute name must be string',)
could not convert column 1, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 1, error: TypeError('getattr(): attribute name must be string',)
could not convert column 2, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 2, error: TypeError('getattr(): attribute name must be string',)
could not convert column 3, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 3, error: TypeError('getattr(): attribute name must be string',)
could not convert column 4, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 4, error: TypeError('getattr(): attribute name must be string',)
could not convert column 5, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 5, error: TypeError('getattr(): attribute name must be string',)
could not convert column 6, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 6, error: TypeError('getattr(): attribute name must be string',)
could not convert column 7, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 7, error: TypeError('getattr(): attribute name must be string',)
could not convert column 8, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 8, error: TypeError('getattr(): attribute name must be string',)
could not convert column 9, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 9, error: TypeError('getattr(): attribute name must be string',)
could not convert column 10, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 10, error: TypeError('getattr(): attribute name must be string',)
could not convert column 11, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 11, error: TypeError('getattr(): attribute name must be string',)
could not convert column 12, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 12, error: TypeError('getattr(): attribute name must be string',)
could not convert column 13, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 13, error: TypeError('getattr(): attribute name must be string',)
could not convert column 14, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 14, error: TypeError('getattr(): attribute name must be string',)
could not convert column 15, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 15, error: TypeError('getattr(): attribute name must be string',)
could not convert column 16, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 16, error: TypeError('getattr(): attribute name must be string',)
could not convert column 17, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 17, error: TypeError('getattr(): attribute name must be string',)
could not convert column 18, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 18, error: TypeError('getattr(): attribute name must be string',)
could not convert column 19, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 19, error: TypeError('getattr(): attribute name must be string',)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in __call__(self, obj, include, exclude)
968 d['include'] = include
969 d['exclude'] = exclude
--> 970 return method(**d)
971 return None
972 else:
5 frames
/usr/local/lib/python3.6/dist-packages/vaex/dataframe.py in column_filter(name)
3486 if not strings and (self.dtype(name) == str_type or self.dtype(name).type == np.string_):
3487 return False
-> 3488 if not hidden and name.startswith('__'):
3489 return False
3490 return True
AttributeError: 'int' object has no attribute 'startswith'
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-a7b44e392414> in <module>()
3 df = pd.DataFrame(X)
4 df['target'] = y
----> 5 vaex.from_pandas(df)
2 frames
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in format(self, obj, include, exclude)
150 return {}, {}
151
--> 152 format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
153
154 if format_dict or md_dict:
TypeError: 'NoneType' object is not iterable
|
AttributeError
|
def export_hdf5(
dataset,
path,
column_names=None,
byteorder="=",
shuffle=False,
selection=False,
progress=None,
virtual=True,
sort=None,
ascending=True,
):
"""
:param DatasetLocal dataset: dataset to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:return:
"""
if selection:
if selection == True: # easier to work with the name
selection = "default"
# first open file using h5py api
with h5py.File(path, "w") as h5file_output:
h5table_output = h5file_output.require_group("/table")
h5table_output.attrs["type"] = "table"
h5columns_output = h5file_output.require_group("/table/columns")
# i1, i2 = dataset.current_slice
N = len(dataset) if not selection else dataset.selected_length(selection)
if N == 0:
raise ValueError("Cannot export empty table")
logger.debug("virtual=%r", virtual)
logger.debug("exporting %d rows to file %s" % (N, path))
# column_names = column_names or (dataset.get_column_names() + (list(dataset.virtual_columns.keys()) if virtual else []))
column_names = column_names or dataset.get_column_names(
virtual=virtual, strings=True, alias=False
)
logger.debug("exporting columns(hdf5): %r" % column_names)
sparse_groups = collections.defaultdict(list)
sparse_matrices = {} # alternative to a set of matrices, since they are not hashable
for column_name in list(column_names):
sparse_matrix = dataset._sparse_matrix(column_name)
if sparse_matrix is not None:
# sparse columns are stored differently
sparse_groups[id(sparse_matrix)].append(column_name)
sparse_matrices[id(sparse_matrix)] = sparse_matrix
continue
dtype = dataset.dtype(column_name)
if column_name in dataset.get_column_names(virtual=False, alias=False):
column = dataset.columns[column_name]
shape = (N,) + column.shape[1:]
else:
shape = (N,)
h5column_output = h5columns_output.require_group(column_name)
if dtype == str_type:
# TODO: if no selection or filter, we could do this
# if isinstance(column, ColumnStringArrow):
# data_shape = column.bytes.shape
# indices_shape = column.indices.shape
# else:
byte_length = (
dataset[column_name].str.byte_length().sum(selection=selection)
)
if byte_length > max_int32:
dtype_indices = "i8"
else:
dtype_indices = "i4"
data_shape = (byte_length,)
indices_shape = (N + 1,)
array = h5column_output.require_dataset(
"data", shape=data_shape, dtype="S1"
)
if byte_length > 0:
array[0] = array[0] # make sure the array really exists
index_array = h5column_output.require_dataset(
"indices", shape=indices_shape, dtype=dtype_indices
)
index_array[0] = index_array[0] # make sure the array really exists
null_value_count = N - dataset.count(column_name, selection=selection)
if null_value_count > 0:
null_shape = ((N + 7) // 8,) # TODO: arrow requires padding right?
null_bitmap_array = h5column_output.require_dataset(
"null_bitmap", shape=null_shape, dtype="u1"
)
null_bitmap_array[0] = null_bitmap_array[
0
] # make sure the array really exists
array.attrs["dtype"] = "str"
# TODO: masked support ala arrow?
else:
if dtype.kind in "mM":
array = h5column_output.require_dataset(
"data", shape=shape, dtype=np.int64
)
array.attrs["dtype"] = dtype.name
elif dtype.kind == "U":
# numpy uses utf32 for unicode
char_length = dtype.itemsize // 4
shape = (N, char_length)
array = h5column_output.require_dataset(
"data", shape=shape, dtype=np.uint8
)
array.attrs["dtype"] = "utf32"
array.attrs["dlength"] = char_length
else:
try:
array = h5column_output.require_dataset(
"data", shape=shape, dtype=dtype.newbyteorder(byteorder)
)
except:
logging.exception(
"error creating dataset for %r, with type %r "
% (column_name, dtype)
)
del h5columns_output[column_name]
column_names.remove(column_name)
array[0] = array[0] # make sure the array really exists
data = dataset.evaluate(column_name, 0, 1, parallel=False)
if np.ma.isMaskedArray(data):
mask = h5column_output.require_dataset(
"mask", shape=shape, dtype=np.bool
)
mask[0] = mask[0] # make sure the array really exists
random_index_name = None
column_order = list(column_names) # copy
if shuffle:
random_index_name = "random_index"
while random_index_name in dataset.get_column_names():
random_index_name += "_new"
shuffle_array = h5columns_output.require_dataset(
random_index_name + "/data", shape=(N,), dtype=byteorder + "i8"
)
shuffle_array[0] = shuffle_array[0]
column_order.append(random_index_name) # last item
h5columns_output.attrs["column_order"] = ",".join(
column_order
) # keep track or the ordering of columns
sparse_index = 0
for sparse_matrix in sparse_matrices.values():
columns = sorted(
sparse_groups[id(sparse_matrix)],
key=lambda col: dataset.columns[col].column_index,
)
name = "sparse" + str(sparse_index)
sparse_index += 1
# TODO: slice columns
# sparse_matrix = sparse_matrix[:,]
sparse_group = h5columns_output.require_group(name)
sparse_group.attrs["type"] = "csr_matrix"
ar = sparse_group.require_dataset(
"data", shape=(len(sparse_matrix.data),), dtype=sparse_matrix.dtype
)
ar[0] = ar[0]
ar = sparse_group.require_dataset(
"indptr",
shape=(len(sparse_matrix.indptr),),
dtype=sparse_matrix.indptr.dtype,
)
ar[0] = ar[0]
ar = sparse_group.require_dataset(
"indices",
shape=(len(sparse_matrix.indices),),
dtype=sparse_matrix.indices.dtype,
)
ar[0] = ar[0]
for i, column_name in enumerate(columns):
h5column = sparse_group.require_group(column_name)
h5column.attrs["column_index"] = i
# after this the file is closed,, and reopen it using out class
dataset_output = vaex.hdf5.dataset.Hdf5MemoryMapped(path, write=True)
column_names = vaex.export._export(
dataset_input=dataset,
dataset_output=dataset_output,
path=path,
random_index_column=random_index_name,
column_names=column_names,
selection=selection,
shuffle=shuffle,
byteorder=byteorder,
progress=progress,
sort=sort,
ascending=ascending,
)
import getpass
import datetime
user = getpass.getuser()
date = str(datetime.datetime.now())
source = dataset.path
description = "file exported by vaex, by user %s, on date %s, from source %s" % (
user,
date,
source,
)
if dataset.description:
description += "previous description:\n" + dataset.description
dataset_output.copy_metadata(dataset)
dataset_output.description = description
logger.debug("writing meta information")
dataset_output.write_meta()
dataset_output.close_files()
return
|
def export_hdf5(
dataset,
path,
column_names=None,
byteorder="=",
shuffle=False,
selection=False,
progress=None,
virtual=True,
sort=None,
ascending=True,
):
"""
:param DatasetLocal dataset: dataset to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:return:
"""
if selection:
if selection == True: # easier to work with the name
selection = "default"
# first open file using h5py api
with h5py.File(path, "w") as h5file_output:
h5table_output = h5file_output.require_group("/table")
h5table_output.attrs["type"] = "table"
h5columns_output = h5file_output.require_group("/table/columns")
# i1, i2 = dataset.current_slice
N = len(dataset) if not selection else dataset.selected_length(selection)
if N == 0:
raise ValueError("Cannot export empty table")
logger.debug("virtual=%r", virtual)
logger.debug("exporting %d rows to file %s" % (N, path))
# column_names = column_names or (dataset.get_column_names() + (list(dataset.virtual_columns.keys()) if virtual else []))
column_names = column_names or dataset.get_column_names(
virtual=virtual, strings=True
)
logger.debug("exporting columns(hdf5): %r" % column_names)
sparse_groups = collections.defaultdict(list)
sparse_matrices = {} # alternative to a set of matrices, since they are not hashable
for column_name in list(column_names):
sparse_matrix = dataset._sparse_matrix(column_name)
if sparse_matrix is not None:
# sparse columns are stored differently
sparse_groups[id(sparse_matrix)].append(column_name)
sparse_matrices[id(sparse_matrix)] = sparse_matrix
continue
dtype = dataset.dtype(column_name)
if column_name in dataset.get_column_names(virtual=False):
column = dataset.columns[column_name]
shape = (N,) + column.shape[1:]
else:
shape = (N,)
h5column_output = h5columns_output.require_group(column_name)
if dtype == str_type:
# TODO: if no selection or filter, we could do this
# if isinstance(column, ColumnStringArrow):
# data_shape = column.bytes.shape
# indices_shape = column.indices.shape
# else:
byte_length = (
dataset[column_name].str.byte_length().sum(selection=selection)
)
if byte_length > max_int32:
dtype_indices = "i8"
else:
dtype_indices = "i4"
data_shape = (byte_length,)
indices_shape = (N + 1,)
array = h5column_output.require_dataset(
"data", shape=data_shape, dtype="S1"
)
if byte_length > 0:
array[0] = array[0] # make sure the array really exists
index_array = h5column_output.require_dataset(
"indices", shape=indices_shape, dtype=dtype_indices
)
index_array[0] = index_array[0] # make sure the array really exists
null_value_count = N - dataset.count(column_name, selection=selection)
if null_value_count > 0:
null_shape = ((N + 7) // 8,) # TODO: arrow requires padding right?
null_bitmap_array = h5column_output.require_dataset(
"null_bitmap", shape=null_shape, dtype="u1"
)
null_bitmap_array[0] = null_bitmap_array[
0
] # make sure the array really exists
array.attrs["dtype"] = "str"
# TODO: masked support ala arrow?
else:
if dtype.kind in "mM":
array = h5column_output.require_dataset(
"data", shape=shape, dtype=np.int64
)
array.attrs["dtype"] = dtype.name
elif dtype.kind == "U":
# numpy uses utf32 for unicode
char_length = dtype.itemsize // 4
shape = (N, char_length)
array = h5column_output.require_dataset(
"data", shape=shape, dtype=np.uint8
)
array.attrs["dtype"] = "utf32"
array.attrs["dlength"] = char_length
else:
try:
array = h5column_output.require_dataset(
"data", shape=shape, dtype=dtype.newbyteorder(byteorder)
)
except:
logging.exception(
"error creating dataset for %r, with type %r "
% (column_name, dtype)
)
del h5columns_output[column_name]
column_names.remove(column_name)
array[0] = array[0] # make sure the array really exists
data = dataset.evaluate(column_name, 0, 1, parallel=False)
if np.ma.isMaskedArray(data):
mask = h5column_output.require_dataset(
"mask", shape=shape, dtype=np.bool
)
mask[0] = mask[0] # make sure the array really exists
random_index_name = None
column_order = list(column_names) # copy
if shuffle:
random_index_name = "random_index"
while random_index_name in dataset.get_column_names():
random_index_name += "_new"
shuffle_array = h5columns_output.require_dataset(
random_index_name + "/data", shape=(N,), dtype=byteorder + "i8"
)
shuffle_array[0] = shuffle_array[0]
column_order.append(random_index_name) # last item
h5columns_output.attrs["column_order"] = ",".join(
column_order
) # keep track or the ordering of columns
sparse_index = 0
for sparse_matrix in sparse_matrices.values():
columns = sorted(
sparse_groups[id(sparse_matrix)],
key=lambda col: dataset.columns[col].column_index,
)
name = "sparse" + str(sparse_index)
sparse_index += 1
# TODO: slice columns
# sparse_matrix = sparse_matrix[:,]
sparse_group = h5columns_output.require_group(name)
sparse_group.attrs["type"] = "csr_matrix"
ar = sparse_group.require_dataset(
"data", shape=(len(sparse_matrix.data),), dtype=sparse_matrix.dtype
)
ar[0] = ar[0]
ar = sparse_group.require_dataset(
"indptr",
shape=(len(sparse_matrix.indptr),),
dtype=sparse_matrix.indptr.dtype,
)
ar[0] = ar[0]
ar = sparse_group.require_dataset(
"indices",
shape=(len(sparse_matrix.indices),),
dtype=sparse_matrix.indices.dtype,
)
ar[0] = ar[0]
for i, column_name in enumerate(columns):
h5column = sparse_group.require_group(column_name)
h5column.attrs["column_index"] = i
# after this the file is closed,, and reopen it using out class
dataset_output = vaex.hdf5.dataset.Hdf5MemoryMapped(path, write=True)
column_names = vaex.export._export(
dataset_input=dataset,
dataset_output=dataset_output,
path=path,
random_index_column=random_index_name,
column_names=column_names,
selection=selection,
shuffle=shuffle,
byteorder=byteorder,
progress=progress,
sort=sort,
ascending=ascending,
)
import getpass
import datetime
user = getpass.getuser()
date = str(datetime.datetime.now())
source = dataset.path
description = "file exported by vaex, by user %s, on date %s, from source %s" % (
user,
date,
source,
)
if dataset.description:
description += "previous description:\n" + dataset.description
dataset_output.copy_metadata(dataset)
dataset_output.description = description
logger.debug("writing meta information")
dataset_output.write_meta()
dataset_output.close_files()
return
|
https://github.com/vaexio/vaex/issues/378
|
could not convert column 0, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 0, error: TypeError('getattr(): attribute name must be string',)
could not convert column 1, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 1, error: TypeError('getattr(): attribute name must be string',)
could not convert column 2, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 2, error: TypeError('getattr(): attribute name must be string',)
could not convert column 3, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 3, error: TypeError('getattr(): attribute name must be string',)
could not convert column 4, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 4, error: TypeError('getattr(): attribute name must be string',)
could not convert column 5, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 5, error: TypeError('getattr(): attribute name must be string',)
could not convert column 6, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 6, error: TypeError('getattr(): attribute name must be string',)
could not convert column 7, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 7, error: TypeError('getattr(): attribute name must be string',)
could not convert column 8, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 8, error: TypeError('getattr(): attribute name must be string',)
could not convert column 9, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 9, error: TypeError('getattr(): attribute name must be string',)
could not convert column 10, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 10, error: TypeError('getattr(): attribute name must be string',)
could not convert column 11, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 11, error: TypeError('getattr(): attribute name must be string',)
could not convert column 12, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 12, error: TypeError('getattr(): attribute name must be string',)
could not convert column 13, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 13, error: TypeError('getattr(): attribute name must be string',)
could not convert column 14, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 14, error: TypeError('getattr(): attribute name must be string',)
could not convert column 15, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 15, error: TypeError('getattr(): attribute name must be string',)
could not convert column 16, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 16, error: TypeError('getattr(): attribute name must be string',)
could not convert column 17, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 17, error: TypeError('getattr(): attribute name must be string',)
could not convert column 18, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 18, error: TypeError('getattr(): attribute name must be string',)
could not convert column 19, error: TypeError('getattr(): attribute name must be string',), will try to convert it to string
Giving up column 19, error: TypeError('getattr(): attribute name must be string',)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in __call__(self, obj, include, exclude)
968 d['include'] = include
969 d['exclude'] = exclude
--> 970 return method(**d)
971 return None
972 else:
5 frames
/usr/local/lib/python3.6/dist-packages/vaex/dataframe.py in column_filter(name)
3486 if not strings and (self.dtype(name) == str_type or self.dtype(name).type == np.string_):
3487 return False
-> 3488 if not hidden and name.startswith('__'):
3489 return False
3490 return True
AttributeError: 'int' object has no attribute 'startswith'
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-15-a7b44e392414> in <module>()
3 df = pd.DataFrame(X)
4 df['target'] = y
----> 5 vaex.from_pandas(df)
2 frames
/usr/local/lib/python3.6/dist-packages/IPython/core/formatters.py in format(self, obj, include, exclude)
150 return {}, {}
151
--> 152 format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
153
154 if format_dict or md_dict:
TypeError: 'NoneType' object is not iterable
|
AttributeError
|
def forModule(cls, name):
"""
Return an instance of this class representing the module of the given name. If the given
module name is "__main__", it will be translated to the actual file name of the top-level
script without the .py or .pyc extension. This method assumes that the module with the
specified name has already been loaded.
"""
module = sys.modules[name]
filePath = os.path.abspath(module.__file__)
filePath = filePath.split(os.path.sep)
filePath[-1], extension = os.path.splitext(filePath[-1])
require(
extension in (".py", ".pyc"),
"The name of a user script/module must end in .py or .pyc.",
)
log.debug("Module name is %s", name)
if name == "__main__":
log.debug("Discovering real name of module")
# User script/module was invoked as the main program
if module.__package__:
# Invoked as a module via python -m foo.bar
log.debug("Script was invoked as a module")
name = [filePath.pop()]
for package in reversed(module.__package__.split(".")):
dirPathTail = filePath.pop()
assert dirPathTail == package
name.append(dirPathTail)
name = ".".join(reversed(name))
dirPath = os.path.sep.join(filePath)
else:
# Invoked as a script via python foo/bar.py
name = filePath.pop()
dirPath = os.path.sep.join(filePath)
cls._check_conflict(dirPath, name)
else:
# User module was imported. Determine the directory containing the top-level package
for package in reversed(name.split(".")):
dirPathTail = filePath.pop()
assert dirPathTail == package
dirPath = os.path.sep.join(filePath)
log.debug("Module dir is %s", dirPath)
require(
os.path.isdir(dirPath),
"Bad directory path %s for module %s. Note that hot-deployment does not support \
.egg-link files yet, or scripts located in the root directory.",
dirPath,
name,
)
fromVirtualEnv = inVirtualEnv() and dirPath.startswith(sys.prefix)
return cls(dirPath=dirPath, name=name, fromVirtualEnv=fromVirtualEnv)
|
def forModule(cls, name):
"""
Return an instance of this class representing the module of the given name. If the given
module name is "__main__", it will be translated to the actual file name of the top-level
script without the .py or .pyc extension. This method assumes that the module with the
specified name has already been loaded.
"""
module = sys.modules[name]
filePath = os.path.abspath(module.__file__)
filePath = filePath.split(os.path.sep)
filePath[-1], extension = os.path.splitext(filePath[-1])
require(
extension in (".py", ".pyc"),
"The name of a user script/module must end in .py or .pyc.",
)
log.debug("Module name is %s", name)
if name == "__main__":
log.debug("Discovering real name of module")
# User script/module was invoked as the main program
if module.__package__:
# Invoked as a module via python -m foo.bar
log.debug("Script was invoked as a module")
name = [filePath.pop()]
for package in reversed(module.__package__.split(".")):
dirPathTail = filePath.pop()
assert dirPathTail == package
name.append(dirPathTail)
name = ".".join(reversed(name))
dirPath = os.path.sep.join(filePath)
else:
# Invoked as a script via python foo/bar.py
name = filePath.pop()
dirPath = os.path.sep.join(filePath)
cls._check_conflict(dirPath, name)
else:
# User module was imported. Determine the directory containing the top-level package
for package in reversed(name.split(".")):
dirPathTail = filePath.pop()
assert dirPathTail == package
dirPath = os.path.sep.join(filePath)
log.debug("Module dir is %s", dirPath)
assert os.path.isdir(dirPath)
fromVirtualEnv = inVirtualEnv() and dirPath.startswith(sys.prefix)
return cls(dirPath=dirPath, name=name, fromVirtualEnv=fromVirtualEnv)
|
https://github.com/DataBiosphere/toil/issues/1232
|
Traceback (most recent call last):
File "/root/toil-scripts/.env/bin/toil-bwa", line 11, in <module>
load_entry_point('toil-scripts==2.1.0a1', 'console_scripts', 'toil-bwa')()
File "build/bdist.linux-x86_64/egg/toil_scripts/bwa_alignment/bwa_alignment.py", line 292, in main
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 390, in wrapJobFn
return JobFunctionWrappingJob(fn, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 1246, in __init__
self.userFunctionModule = ModuleDescriptor.forModule(userFunction.__module__).globalize()
File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 381, in forModule
assert os.path.isdir(dirPath)
AssertionError
|
AssertionError
|
def destroy(self):
self._bind()
for name in "jobItems", "jobFileIDs", "files", "statsFiles", "statsFileIDs":
resource = getattr(self, name)
if resource is not None:
if isinstance(resource, AzureTable):
resource.delete_table()
elif isinstance(resource, AzureBlobContainer):
resource.delete_container()
else:
assert False
setattr(self, name, None)
|
def destroy(self):
for name in "jobItems", "jobFileIDs", "files", "statsFiles", "statsFileIDs":
resource = getattr(self, name)
if resource is not None:
if isinstance(resource, AzureTable):
resource.delete_table()
elif isinstance(resource, AzureBlobContainer):
resource.delete_container()
else:
assert False
setattr(self, name, None)
|
https://github.com/DataBiosphere/toil/issues/1125
|
anovak@amn2master1:~/hgvm-graph-bakeoff-evaluations$ toil clean azure:hgvm:amntree
amn2master1 2016-08-29 20:12:58,971 MainThread INFO toil.lib.bioio: Logging set at level: INFO
amn2master1 2016-08-29 20:12:58,971 MainThread INFO toil.utils.toilClean: Attempting to delete the job store
amn2master1 2016-08-29 20:12:59,034 MainThread INFO toil.utils.toilClean: Successfully deleted the job store
anovak@amn2master1:~/hgvm-graph-bakeoff-evaluations$ scripts/parallelMappingEvaluation.py azure:hgvm:amntree graph_urls.tsv azure:hgvm:pilot-input/reads azure:hgvm:pilot-output/low-coverage/aug16/out1 --batchSystem=mesos --mesosMaster 10.0.0.5:5050 --kmer_size=16 --edge_max=3 --index_mode gcsa-mem --include_primary --sample_limit 10 --disableCaching --workDir /tmp 2>&1 | tee log.txt
INFO:toil.lib.bioio:Logging set at level: INFO
INFO:toil.lib.bioio:Logging set at level: INFO
INFO:requests.packages.urllib3.connectionpool:Starting new HTTPS connection (1): hgvm.table.core.windows.net
Traceback (most recent call last):
File "scripts/parallelMappingEvaluation.py", line 1170, in <module>
sys.exit(main(sys.argv))
File "scripts/parallelMappingEvaluation.py", line 1160, in main
failed_jobs = Job.Runner.startToil(root_job, options)
File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 540, in startToil
with Toil(options) as toil:
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 501, in __enter__
jobStore.initialize(config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 152, in initialize
raise JobStoreExistsException(self.locator)
toil.jobStores.abstractJobStore.JobStoreExistsException: The job store 'hgvm:amntree' already exists. Use --restart to resume the workflow, or remove the job store with 'toil clean' to start the workflow from scratch
anovak@amn2master1:~/hgvm-graph-bakeoff-evaluations$
|
toil.jobStores.abstractJobStore.JobStoreExistsException
|
def _setupHotDeployment(self, userScript=None):
"""
Determine the user script, save it to the job store and inject a reference to the saved
copy into the batch system such that it can hot-deploy the resource on the worker
nodes.
:param toil.resource.ModuleDescriptor userScript: the module descriptor referencing the
user script. If None, it will be looked up in the job store.
"""
if userScript is not None:
# This branch is hit when a workflow is being started
if userScript.belongsToToil:
logger.info(
"User script %s belongs to Toil. No need to hot-deploy it.", userScript
)
userScript = None
else:
if self._batchSystem.supportsHotDeployment():
# Note that by saving the ModuleDescriptor, and not the Resource we allow for
# redeploying a potentially modified user script on workflow restarts.
with self._jobStore.writeSharedFileStream("userScript") as f:
cPickle.dump(userScript, f, protocol=cPickle.HIGHEST_PROTOCOL)
else:
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
if not isinstance(self._batchSystem, SingleMachineBatchSystem):
logger.warn(
"Batch system does not support hot-deployment. The user "
"script %s will have to be present at the same location on "
"every worker.",
userScript,
)
userScript = None
else:
# This branch is hit on restarts
from toil.jobStores.abstractJobStore import NoSuchFileException
try:
with self._jobStore.readSharedFileStream("userScript") as f:
userScript = cPickle.load(f)
except NoSuchFileException:
logger.info(
"User script neither set explicitly nor present in the job store."
)
userScript = None
if userScript is None:
logger.info("No user script to hot-deploy.")
else:
logger.debug("Saving user script %s as a resource", userScript)
userScriptResource = userScript.saveAsResourceTo(self._jobStore)
logger.debug("Injecting user script %s into batch system.", userScriptResource)
self._batchSystem.setUserScript(userScriptResource)
thread = Thread(
target=self._refreshUserScript,
name="refreshUserScript",
kwargs=dict(userScriptResource=userScriptResource),
)
thread.daemon = True
thread.start()
|
def _setupHotDeployment(self, userScript=None):
"""
Determine the user script, save it to the job store and inject a reference to the saved
copy into the batch system such that it can hot-deploy the resource on the worker
nodes.
:param toil.resource.ModuleDescriptor userScript: the module descriptor referencing the
user script. If None, it will be looked up in the job store.
"""
if userScript is not None:
# This branch is hit when a workflow is being started
if userScript.belongsToToil:
logger.info(
"User script %s belongs to Toil. No need to hot-deploy it.", userScript
)
userScript = None
else:
if self._batchSystem.supportsHotDeployment():
# Note that by saving the ModuleDescriptor, and not the Resource we allow for
# redeploying a potentially modified user script on workflow restarts.
with self._jobStore.writeSharedFileStream("userScript") as f:
cPickle.dump(userScript, f, protocol=cPickle.HIGHEST_PROTOCOL)
else:
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
if not isinstance(self._batchSystem, SingleMachineBatchSystem):
logger.warn(
"Batch system does not support hot-deployment. The user "
"script %s will have to be present at the same location on "
"every worker.",
userScript,
)
userScript = None
else:
# This branch is hit on restarts
from toil.jobStores.abstractJobStore import NoSuchFileException
try:
with self._jobStore.readSharedFileStream("userScript") as f:
userScript = cPickle.load(f)
except NoSuchFileException:
logger.info(
"User script neither set explicitly nor present in the job store."
)
userScript = None
if userScript is None:
logger.info("No user script to hot-deploy.")
else:
logger.info("Saving user script %s as a resource", userScript)
userScriptResource = userScript.saveAsResourceTo(self._jobStore)
logger.info("Hot-deploying user script resource %s.", userScriptResource)
self._batchSystem.setUserScript(userScriptResource)
|
https://github.com/DataBiosphere/toil/issues/668
|
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: Traceback (most recent call last):
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 266, in main
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 937, in _loadJob
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 908, in _loadUserModule
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: userModule = userModule.localize()
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 416, in localize
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: resource.download(callback=stash)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 152, in download
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: self._save(tempDirPath)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 269, in _save
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: self._download(bytesIO)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 223, in _download
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: with closing(urlopen(self.url)) as content:
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 154, in urlopen
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: return opener.open(url, data, timeout)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 437, in open
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: response = meth(req, response)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 550, in http_response
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: 'http', request, response, code, msg, hdrs)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 475, in error
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: return self._call_chain(*args)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 409, in _call_chain
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: result = func(*args)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 558, in http_error_default
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: HTTPError: HTTP Error 400: Bad Request
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: Exiting the worker because of a failed jobWrapper on host ip-172-31-45-12
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host ip-172-31-45-12
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job e8f55793-15ca-471a-947f-38936f917bba to 2
|
HTTPError
|
def create(cls, jobStore, leaderPath):
"""
Saves the content of the file or directory at the given path to the given job store
and returns a resource object representing that content for the purpose of obtaining it
again at a generic, public URL. This method should be invoked on the leader node.
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore:
:param str leaderPath:
:rtype: Resource
"""
pathHash = cls._pathHash(leaderPath)
contentHash = hashlib.md5()
# noinspection PyProtectedMember
with cls._load(leaderPath) as src:
with jobStore.writeSharedFileStream(
sharedFileName=pathHash, isProtected=False
) as dst:
userScript = src.read()
contentHash.update(userScript)
dst.write(userScript)
return cls(
name=os.path.basename(leaderPath),
pathHash=pathHash,
url=jobStore.getSharedPublicUrl(sharedFileName=pathHash),
contentHash=contentHash.hexdigest(),
)
|
def create(cls, jobStore, leaderPath):
"""
Saves the content of the file or directory at the given path to the given job store
and returns a resource object representing that content for the purpose of obtaining it
again at a generic, public URL. This method should be invoked on the leader node.
:rtype: Resource
"""
pathHash = cls._pathHash(leaderPath)
contentHash = hashlib.md5()
# noinspection PyProtectedMember
with cls._load(leaderPath) as src:
with jobStore.writeSharedFileStream(
sharedFileName=pathHash, isProtected=False
) as dst:
userScript = src.read()
contentHash.update(userScript)
dst.write(userScript)
return cls(
name=os.path.basename(leaderPath),
pathHash=pathHash,
url=jobStore.getSharedPublicUrl(sharedFileName=pathHash),
contentHash=contentHash.hexdigest(),
)
|
https://github.com/DataBiosphere/toil/issues/668
|
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: Traceback (most recent call last):
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 266, in main
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 937, in _loadJob
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 908, in _loadUserModule
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: userModule = userModule.localize()
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 416, in localize
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: resource.download(callback=stash)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 152, in download
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: self._save(tempDirPath)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 269, in _save
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: self._download(bytesIO)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/local/lib/python2.7/dist-packages/toil/resource.py", line 223, in _download
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: with closing(urlopen(self.url)) as content:
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 154, in urlopen
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: return opener.open(url, data, timeout)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 437, in open
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: response = meth(req, response)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 550, in http_response
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: 'http', request, response, code, msg, hdrs)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 475, in error
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: return self._call_chain(*args)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 409, in _call_chain
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: result = func(*args)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: File "/usr/lib/python2.7/urllib2.py", line 558, in http_error_default
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: HTTPError: HTTP Error 400: Bad Request
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: Exiting the worker because of a failed jobWrapper on host ip-172-31-45-12
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host ip-172-31-45-12
WARNING:toil.leader:e8f55793-15ca-471a-947f-38936f917bba: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job e8f55793-15ca-471a-947f-38936f917bba to 2
|
HTTPError
|
def defer(self, function, *args, **kwargs):
"""
Register a deferred function, i.e. a callable that will be invoked after the current
attempt at running this job concludes. A job attempt is said to conclude when the job
function (or the :meth:`Job.run` method for class-based jobs) returns, raises an
exception or after the process running it terminates abnormally. A deferred function will
be called on the node that attempted to run the job, even if a subsequent attempt is made
on another node. A deferred function should be idempotent because it may be called
multiple times on the same node or even in the same process. More than one deferred
function may be registered per job attempt by calling this method repeatedly with
different arguments. If the same function is registered twice with the same or different
arguments, it will be called twice per job attempt.
Examples for deferred functions are ones that handle cleanup of resources external to
Toil, like Docker containers, files outside the work directory, etc.
:param callable function: The function to be called after this job concludes.
:param list args: The arguments to the function
:param dict kwargs: The keyword arguments to the function
"""
require(
self.fileStore is not None,
"A deferred function may only be registered with a "
"job while that job is running.",
)
self.fileStore._registerDeferredFunction(
DeferredFunction.create(function, *args, **kwargs)
)
|
def defer(self, callable, *args, **kwargs):
"""
Register a deferred function, i.e. a callable that will be invoked after the current attempt
at running this job concludes. A job attempt is said to conclude when the job function (or
the Job.run method for class-based jobs) returns, raises an exception or after the process
running it terminates abnormally. A deferred function will be called on the node that
attempted to run the job, even if a subsequent attempt is made on another node. A deferred
function should be idempotent because it may be called multiple times on the same node or
even in the same process. More than one deferred function may be registered per job attempt
by calling this method repeatedly with different arguments. If the same callable is
registered twice, it will be called twice per job attempt.
The functions one would typically provide here are cleanup functions that handle
Toil-external events upon a failure within Toil (killing Docker containers, etc).
:param function callable: The function to be run after this job.
:param list args: The arguments to the function
:param dict kwargs: The keyword arguments to the function
:return: None
"""
try:
getattr(self, "fileStore")
except AttributeError:
raise RuntimeError(
"A deferred function may only be registered from within the job it "
'is being registered with. "%s" was illegally registered.',
callable.__name__,
)
self.fileStore._registerDeferredFunction(callable, *args, **kwargs)
|
https://github.com/DataBiosphere/toil/issues/1308
|
---TOIL WORKER OUTPUT LOG---
f006bc8b-195a-45b8-8e35-2291a2985f14 Next available file descriptor: 8
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Next available file descriptor: 8
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Loaded job f006bc8b-195a-45b8-8e35-2291a2985f14
f006bc8b-195a-45b8-8e35-2291a2985f14 Parsed jobWrapper
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Parsed jobWrapper
f006bc8b-195a-45b8-8e35-2291a2985f14 Got a command to run: _toil d171ba6d-1842-4f3e-998a-84c3d11cee59 /usr/local/lib/python2.7/dist-packages toil.job False
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Got a command to run: _toil d171ba6d-1842-4f3e-998a-84c3d11cee59 /usr/local/lib/python2.7/dist-packages toil.job Fals
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/dist-packages', name='toil.job', fromVirtualEnv=F
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Reading FileInfo(fileID='d171ba6d-1842-4f3e-998a-84c3d11cee59', ownerID='96b4d6f2-1b56-48f5-b1c4-9f0d
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting Job from module toil.job.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting Config from module toil.common.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting defaultdict from module collections.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting list from module __builtin__.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting ModuleDescriptor from module toil.resource.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting set from module __builtin__.
f006bc8b-195a-45b8-8e35-2291a2985f14 INFO:toil.fileStore:Starting job (d171ba6d-1842-4f3e-998a-84c3d11cee59) with ID (d9a3bddaadfb045480c8964cb490dcba2493d8f9).
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.fileStore:CACHE: Obtained lock on file /var/lib/toil/toil-17daa335-7e62-4772-9308-53456ca4b46a/cache-17daa335-7e62-4772-9308
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.fileStore:CACHE: Released lock
f006bc8b-195a-45b8-8e35-2291a2985f14 Traceback (most recent call last):
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 323, in main
f006bc8b-195a-45b8-8e35-2291a2985f14 caching=not config.disableCaching)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 109, in createFileStore
f006bc8b-195a-45b8-8e35-2291a2985f14 return fileStoreCls(jobStore, jobWrapper, localTempDir, inputBlockFn)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 104, in __init__
f006bc8b-195a-45b8-8e35-2291a2985f14 self._setupCache()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 624, in _setupCache
f006bc8b-195a-45b8-8e35-2291a2985f14 with self._CacheState.open(self) as cacheInfo:
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
f006bc8b-195a-45b8-8e35-2291a2985f14 return self.gen.next()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 986, in open
f006bc8b-195a-45b8-8e35-2291a2985f14 cacheInfo = cls._load(outer.cacheStateFile)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 999, in _load
f006bc8b-195a-45b8-8e35-2291a2985f14 cacheInfoDict = dill.load(fH)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/dill/dill.py", line 250, in load
f006bc8b-195a-45b8-8e35-2291a2985f14 obj = pik.load()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 864, in load
f006bc8b-195a-45b8-8e35-2291a2985f14 dispatch[key](self)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 1096, in load_global
f006bc8b-195a-45b8-8e35-2291a2985f14 klass = self.find_class(module, name)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/dill/dill.py", line 406, in find_class
f006bc8b-195a-45b8-8e35-2291a2985f14 return StockUnpickler.find_class(self, module, name)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 1130, in find_class
f006bc8b-195a-45b8-8e35-2291a2985f14 __import__(module)
f006bc8b-195a-45b8-8e35-2291a2985f14 ImportError: No module named toil_lib.programs
f006bc8b-195a-45b8-8e35-2291a2985f14 Exiting the worker because of a failed jobWrapper on host ip-172-31-3-245
f006bc8b-195a-45b8-8e35-2291a2985f14 ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host ip-172-31-3-245
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Loaded job f006bc8b-195a-45b8-8e35-2291a2985f14
f006bc8b-195a-45b8-8e35-2291a2985f14 WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job f006bc8b-195a-45b8-8e35-2291a2985f14 to 0
f006bc8b-195a-45b8-8e35-2291a2985f14 WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
|
ImportError
|
def _loadUserModule(cls, userModule):
"""
Imports and returns the module object represented by the given module descriptor.
:type userModule: ModuleDescriptor
"""
return userModule.load()
|
def _loadUserModule(cls, userModule):
"""
Imports and returns the module object represented by the given module descriptor.
:type userModule: ModuleDescriptor
"""
if not userModule.belongsToToil:
userModule = userModule.localize()
if userModule.dirPath not in sys.path:
sys.path.append(userModule.dirPath)
try:
return importlib.import_module(userModule.name)
except ImportError:
logger.error(
"Failed to import user module %r from sys.path=%r", userModule, sys.path
)
raise
|
https://github.com/DataBiosphere/toil/issues/1308
|
---TOIL WORKER OUTPUT LOG---
f006bc8b-195a-45b8-8e35-2291a2985f14 Next available file descriptor: 8
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Next available file descriptor: 8
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Loaded job f006bc8b-195a-45b8-8e35-2291a2985f14
f006bc8b-195a-45b8-8e35-2291a2985f14 Parsed jobWrapper
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Parsed jobWrapper
f006bc8b-195a-45b8-8e35-2291a2985f14 Got a command to run: _toil d171ba6d-1842-4f3e-998a-84c3d11cee59 /usr/local/lib/python2.7/dist-packages toil.job False
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Got a command to run: _toil d171ba6d-1842-4f3e-998a-84c3d11cee59 /usr/local/lib/python2.7/dist-packages toil.job Fals
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/dist-packages', name='toil.job', fromVirtualEnv=F
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Reading FileInfo(fileID='d171ba6d-1842-4f3e-998a-84c3d11cee59', ownerID='96b4d6f2-1b56-48f5-b1c4-9f0d
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting Job from module toil.job.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting Config from module toil.common.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting defaultdict from module collections.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting list from module __builtin__.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting ModuleDescriptor from module toil.resource.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting set from module __builtin__.
f006bc8b-195a-45b8-8e35-2291a2985f14 INFO:toil.fileStore:Starting job (d171ba6d-1842-4f3e-998a-84c3d11cee59) with ID (d9a3bddaadfb045480c8964cb490dcba2493d8f9).
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.fileStore:CACHE: Obtained lock on file /var/lib/toil/toil-17daa335-7e62-4772-9308-53456ca4b46a/cache-17daa335-7e62-4772-9308
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.fileStore:CACHE: Released lock
f006bc8b-195a-45b8-8e35-2291a2985f14 Traceback (most recent call last):
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 323, in main
f006bc8b-195a-45b8-8e35-2291a2985f14 caching=not config.disableCaching)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 109, in createFileStore
f006bc8b-195a-45b8-8e35-2291a2985f14 return fileStoreCls(jobStore, jobWrapper, localTempDir, inputBlockFn)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 104, in __init__
f006bc8b-195a-45b8-8e35-2291a2985f14 self._setupCache()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 624, in _setupCache
f006bc8b-195a-45b8-8e35-2291a2985f14 with self._CacheState.open(self) as cacheInfo:
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
f006bc8b-195a-45b8-8e35-2291a2985f14 return self.gen.next()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 986, in open
f006bc8b-195a-45b8-8e35-2291a2985f14 cacheInfo = cls._load(outer.cacheStateFile)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 999, in _load
f006bc8b-195a-45b8-8e35-2291a2985f14 cacheInfoDict = dill.load(fH)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/dill/dill.py", line 250, in load
f006bc8b-195a-45b8-8e35-2291a2985f14 obj = pik.load()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 864, in load
f006bc8b-195a-45b8-8e35-2291a2985f14 dispatch[key](self)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 1096, in load_global
f006bc8b-195a-45b8-8e35-2291a2985f14 klass = self.find_class(module, name)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/dill/dill.py", line 406, in find_class
f006bc8b-195a-45b8-8e35-2291a2985f14 return StockUnpickler.find_class(self, module, name)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 1130, in find_class
f006bc8b-195a-45b8-8e35-2291a2985f14 __import__(module)
f006bc8b-195a-45b8-8e35-2291a2985f14 ImportError: No module named toil_lib.programs
f006bc8b-195a-45b8-8e35-2291a2985f14 Exiting the worker because of a failed jobWrapper on host ip-172-31-3-245
f006bc8b-195a-45b8-8e35-2291a2985f14 ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host ip-172-31-3-245
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Loaded job f006bc8b-195a-45b8-8e35-2291a2985f14
f006bc8b-195a-45b8-8e35-2291a2985f14 WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job f006bc8b-195a-45b8-8e35-2291a2985f14 to 0
f006bc8b-195a-45b8-8e35-2291a2985f14 WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
|
ImportError
|
def _jobStoreClasses(self):
"""
A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore]
"""
jobStoreClassNames = (
"toil.jobStores.azureJobStore.AzureJobStore",
"toil.jobStores.fileJobStore.FileJobStore",
"toil.jobStores.googleJobStore.GoogleJobStore",
"toil.jobStores.aws.jobStore.AWSJobStore",
"toil.jobStores.abstractJobStore.JobStoreSupport",
)
jobStoreClasses = []
for className in jobStoreClassNames:
moduleName, className = className.rsplit(".", 1)
from importlib import import_module
try:
module = import_module(moduleName)
except ImportError:
logger.debug(
"Unable to import '%s' as is expected if the corresponding extra was "
"omitted at installation time.",
moduleName,
)
else:
jobStoreClass = getattr(module, className)
jobStoreClasses.append(jobStoreClass)
return jobStoreClasses
|
def _jobStoreClasses(self):
"""
A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore]
"""
jobStoreClassNames = (
"toil.jobStores.azureJobStore.AzureJobStore",
"toil.jobStores.fileJobStore.FileJobStore",
"toil.jobStores.googleJobStore.GoogleJobStore",
"toil.jobStores.aws.jobStore.AWSJobStore",
"toil.jobStores.abstractJobStore.JobStoreSupport",
)
jobStoreClasses = []
for className in jobStoreClassNames:
moduleName, className = className.rsplit(".", 1)
from importlib import import_module
try:
module = import_module(moduleName)
except ImportError:
logger.info(
"Unable to import '%s'. You may want to try reinstalling Toil with "
"additional extras.",
moduleName,
)
else:
jobStoreClass = getattr(module, className)
jobStoreClasses.append(jobStoreClass)
return jobStoreClasses
|
https://github.com/DataBiosphere/toil/issues/1308
|
---TOIL WORKER OUTPUT LOG---
f006bc8b-195a-45b8-8e35-2291a2985f14 Next available file descriptor: 8
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Next available file descriptor: 8
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Loaded job f006bc8b-195a-45b8-8e35-2291a2985f14
f006bc8b-195a-45b8-8e35-2291a2985f14 Parsed jobWrapper
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Parsed jobWrapper
f006bc8b-195a-45b8-8e35-2291a2985f14 Got a command to run: _toil d171ba6d-1842-4f3e-998a-84c3d11cee59 /usr/local/lib/python2.7/dist-packages toil.job False
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.worker:Got a command to run: _toil d171ba6d-1842-4f3e-998a-84c3d11cee59 /usr/local/lib/python2.7/dist-packages toil.job Fals
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/usr/local/lib/python2.7/dist-packages', name='toil.job', fromVirtualEnv=F
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Reading FileInfo(fileID='d171ba6d-1842-4f3e-998a-84c3d11cee59', ownerID='96b4d6f2-1b56-48f5-b1c4-9f0d
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting Job from module toil.job.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting Config from module toil.common.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting defaultdict from module collections.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting list from module __builtin__.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting ModuleDescriptor from module toil.resource.
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.job:Getting set from module __builtin__.
f006bc8b-195a-45b8-8e35-2291a2985f14 INFO:toil.fileStore:Starting job (d171ba6d-1842-4f3e-998a-84c3d11cee59) with ID (d9a3bddaadfb045480c8964cb490dcba2493d8f9).
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.fileStore:CACHE: Obtained lock on file /var/lib/toil/toil-17daa335-7e62-4772-9308-53456ca4b46a/cache-17daa335-7e62-4772-9308
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.fileStore:CACHE: Released lock
f006bc8b-195a-45b8-8e35-2291a2985f14 Traceback (most recent call last):
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 323, in main
f006bc8b-195a-45b8-8e35-2291a2985f14 caching=not config.disableCaching)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 109, in createFileStore
f006bc8b-195a-45b8-8e35-2291a2985f14 return fileStoreCls(jobStore, jobWrapper, localTempDir, inputBlockFn)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 104, in __init__
f006bc8b-195a-45b8-8e35-2291a2985f14 self._setupCache()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 624, in _setupCache
f006bc8b-195a-45b8-8e35-2291a2985f14 with self._CacheState.open(self) as cacheInfo:
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
f006bc8b-195a-45b8-8e35-2291a2985f14 return self.gen.next()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 986, in open
f006bc8b-195a-45b8-8e35-2291a2985f14 cacheInfo = cls._load(outer.cacheStateFile)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/toil/fileStore.py", line 999, in _load
f006bc8b-195a-45b8-8e35-2291a2985f14 cacheInfoDict = dill.load(fH)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/dill/dill.py", line 250, in load
f006bc8b-195a-45b8-8e35-2291a2985f14 obj = pik.load()
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 864, in load
f006bc8b-195a-45b8-8e35-2291a2985f14 dispatch[key](self)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 1096, in load_global
f006bc8b-195a-45b8-8e35-2291a2985f14 klass = self.find_class(module, name)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/local/lib/python2.7/dist-packages/dill/dill.py", line 406, in find_class
f006bc8b-195a-45b8-8e35-2291a2985f14 return StockUnpickler.find_class(self, module, name)
f006bc8b-195a-45b8-8e35-2291a2985f14 File "/usr/lib/python2.7/pickle.py", line 1130, in find_class
f006bc8b-195a-45b8-8e35-2291a2985f14 __import__(module)
f006bc8b-195a-45b8-8e35-2291a2985f14 ImportError: No module named toil_lib.programs
f006bc8b-195a-45b8-8e35-2291a2985f14 Exiting the worker because of a failed jobWrapper on host ip-172-31-3-245
f006bc8b-195a-45b8-8e35-2291a2985f14 ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host ip-172-31-3-245
f006bc8b-195a-45b8-8e35-2291a2985f14 DEBUG:toil.jobStores.aws.jobStore:Loaded job f006bc8b-195a-45b8-8e35-2291a2985f14
f006bc8b-195a-45b8-8e35-2291a2985f14 WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job f006bc8b-195a-45b8-8e35-2291a2985f14 to 0
f006bc8b-195a-45b8-8e35-2291a2985f14 WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
|
ImportError
|
def processTotallyFailedJob(self, jobWrapper):
"""
Processes a totally failed job.
"""
# Mark job as a totally failed job
self.toilState.totalFailedJobs.add(jobWrapper.jobStoreID)
if jobWrapper.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is
# a service job
logger.debug(
"Service job is being processed as a totally failed job: %s"
% jobWrapper.jobStoreID
)
predecessorJobWrapper = self.toilState.serviceJobStoreIDToPredecessorJob[
jobWrapper.jobStoreID
]
# This removes the service job as a service of the predecessor
# and potentially makes the predecessor active
self._updatePredecessorStatus(jobWrapper.jobStoreID)
# Remove the start flag, if it still exists. This indicates
# to the service manager that the job has "started", this prevents
# the service manager from deadlocking while waiting
self.jobStore.deleteFile(jobWrapper.startJobStoreID)
# Signal to any other services in the group that they should
# terminate. We do this to prevent other services in the set
# of services from deadlocking waiting for this service to start properly
if predecessorJobWrapper.jobStoreID in self.toilState.servicesIssued:
self.serviceManager.killServices(
self.toilState.servicesIssued[predecessorJobWrapper.jobStoreID],
error=True,
)
logger.debug(
"Job: %s is instructing all the services of its parent job to quit",
jobWrapper.jobStoreID,
)
self.toilState.hasFailedSuccessors.add(
predecessorJobWrapper.jobStoreID
) # This ensures that the
# job will not attempt to run any of it's successors on the stack
else:
# Is a non-service job
assert jobWrapper.jobStoreID not in self.toilState.servicesIssued
# Traverse failed job's successor graph and get the jobStoreID of new successors.
# Any successor already in toilState.failedSuccessors will not be traversed
# All successors traversed will be added to toilState.failedSuccessors and returned
# as a set (unseenSuccessors).
unseenSuccessors = self.getSuccessors(
jobWrapper, self.toilState.failedSuccessors, self.jobStore
)
logger.debug(
"Found new failed successors: %s of job: %s"
% (" ".join(unseenSuccessors), jobWrapper.jobStoreID)
)
# For each newly found successor
for successorJobStoreID in unseenSuccessors:
# If the successor is a successor of other jobs that have already tried to schedule it
if (
successorJobStoreID
in self.toilState.successorJobStoreIDToPredecessorJobs
):
# For each such predecessor job
# (we remove the successor from toilState.successorJobStoreIDToPredecessorJobs to avoid doing
# this multiple times for each failed predecessor)
for (
predecessorJob
) in self.toilState.successorJobStoreIDToPredecessorJobs.pop(
successorJobStoreID
):
# Reduce the predecessor job's successor count.
self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1
# Indicate that it has failed jobs.
self.toilState.hasFailedSuccessors.add(predecessorJob.jobStoreID)
logger.debug(
"Marking job: %s as having failed successors (found by reading successors failed job)"
% predecessorJob.jobStoreID
)
# If the predecessor has no remaining successors, add to list of active jobs
assert (
self.toilState.successorCounts[predecessorJob.jobStoreID] >= 0
)
if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0:
self.toilState.updatedJobs.add((predecessorJob, 0))
# Remove the predecessor job from the set of jobs with successors.
self.toilState.successorCounts.pop(predecessorJob.jobStoreID)
# If the job has predecessor(s)
if jobWrapper.jobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs:
# For each predecessor of the job
for (
predecessorJobWrapper
) in self.toilState.successorJobStoreIDToPredecessorJobs[
jobWrapper.jobStoreID
]:
# Mark the predecessor as failed
self.toilState.hasFailedSuccessors.add(predecessorJobWrapper.jobStoreID)
logger.debug(
"Totally failed job: %s is marking direct predecessor: %s as having failed jobs",
jobWrapper.jobStoreID,
predecessorJobWrapper.jobStoreID,
)
self._updatePredecessorStatus(jobWrapper.jobStoreID)
|
def processTotallyFailedJob(self, jobWrapper):
"""
Processes a totally failed job.
"""
# Mark job as a totally failed job
self.toilState.totalFailedJobs.add(jobWrapper.jobStoreID)
if jobWrapper.jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob: # Is
# a service job
logger.debug(
"Service job is being processed as a totally failed job: %s"
% jobWrapper.jobStoreID
)
predecessorJobWrapper = self.toilState.serviceJobStoreIDToPredecessorJob[
jobWrapper.jobStoreID
]
# This removes the service job as a service of the predecessor
# and potentially makes the predecessor active
self._updatePredecessorStatus(jobWrapper.jobStoreID)
# Remove the start flag, if it still exists. This indicates
# to the service manager that the job has "started", this prevents
# the service manager from deadlocking while waiting
self.jobStore.deleteFile(jobWrapper.startJobStoreID)
# Signal to any other services in the group that they should
# terminate. We do this to prevent other services in the set
# of services from deadlocking waiting for this service to start properly
if predecessorJobWrapper.jobStoreID in self.toilState.servicesIssued:
self.serviceManager.killServices(
self.toilState.servicesIssued[predecessorJobWrapper.jobStoreID],
error=True,
)
logger.debug(
"Job: %s is instructing all the services of its parent job to quit",
jobWrapper.jobStoreID,
)
self.toilState.hasFailedSuccessors.add(
predecessorJobWrapper.jobStoreID
) # This ensures that the
# job will not attempt to run any of it's successors on the stack
else:
assert jobWrapper.jobStoreID not in self.toilState.servicesIssued
# Is a non-service job, walk up the tree killing services of any jobs with nothing left to do.
if jobWrapper.jobStoreID in self.toilState.successorJobStoreIDToPredecessorJobs:
for (
predecessorJobWrapper
) in self.toilState.successorJobStoreIDToPredecessorJobs[
jobWrapper.jobStoreID
]:
self.toilState.hasFailedSuccessors.add(predecessorJobWrapper.jobStoreID)
logger.debug(
"Totally failed job: %s is marking direct predecessors %s as having failed jobs",
jobWrapper.jobStoreID,
predecessorJobWrapper.jobStoreID,
)
self._updatePredecessorStatus(jobWrapper.jobStoreID)
|
https://github.com/DataBiosphere/toil/issues/1122
|
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: The jobWrapper seems to have left a log file, indicating failure: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: Reporting file: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: ---TOIL WORKER OUTPUT LOG---
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:rdflib:RDFLib Version: 4.2.1
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:CACHE: Read file with ID 'a/2/tmp2RYy7z.tmp' from the cache.
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:cwltool:[job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: 'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:cwltool:'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:[job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: Final process status is permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:Final process status is permanentFail
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:LOG-TO-MASTER: Successfully deleted local copies of file with ID 'a/2/tmp2RYy7z.tmp'.
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: Traceback (most recent call last):
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/worker.py", line 331, in main
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: job._runner(jobWrapper=jobWrapper, jobStore=jobStore, fileStore=fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1085, in _runner
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: returnValues = self._run(jobWrapper, fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1033, in _run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: return self.run(fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 256, in run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: **opts)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/cwltool/main.py", line 218, in single_job_executor
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: raise WorkflowException(u"Process status is %s" % (final_status))
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WorkflowException: Process status is ['permanentFail']
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job B/U/jobWBMw6I to 0
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: Job: B/U/jobWBMw6I is completely failed
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: No jobs left to run so exiting.
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Finished the main loop
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Waiting for stats and logging collator thread to finish ...
kunkel 2016-08-26 11:18:31,638 MainThread INFO toil.leader: ... finished collating stats and logs. Took 0.321724891663 seconds
kunkel 2016-08-26 11:18:31,639 MainThread INFO toil.leader: Waiting for service manager thread to finish ...
kunkel 2016-08-26 11:18:32,129 MainThread INFO toil.leader: ... finished shutting down the service manager. Took 0.490026950836 seconds
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.5.0a1', 'console_scripts', 'cwltoil')()
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 665, in main
outobj = toil.start(wf1)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 580, in start
return self._runMainLoop(job)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 794, in _runMainLoop
jobCache=self._jobCache)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 692, in mainLoop
innerLoop(jobStore, config, batchSystem, toilState, jobBatcher, serviceManager, statsAndLogging)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 970, in innerLoop
assert toilState.successorCounts == {}
AssertionError
|
AssertionError
|
def _updatePredecessorStatus(self, jobStoreID):
"""
Update status of predecessors for finished successor job.
"""
if jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob:
# Is a service job
predecessorJob = self.toilState.serviceJobStoreIDToPredecessorJob.pop(
jobStoreID
)
self.toilState.servicesIssued[predecessorJob.jobStoreID].pop(jobStoreID)
if (
len(self.toilState.servicesIssued[predecessorJob.jobStoreID]) == 0
): # Predecessor job has
# all its services terminated
self.toilState.servicesIssued.pop(
predecessorJob.jobStoreID
) # The job has no running services
self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know
# the job is done we can add it to the list of updated job files
logger.debug(
"Job %s services have completed or totally failed, adding to updated jobs"
% predecessorJob.jobStoreID
)
elif jobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs:
# We have reach the root job
assert len(self.toilState.updatedJobs) == 0
assert len(self.toilState.successorJobStoreIDToPredecessorJobs) == 0
assert len(self.toilState.successorCounts) == 0
logger.debug("Reached root job %s so no predecessors to clean up" % jobStoreID)
else:
# Is a non-root, non-service job
logger.debug("Cleaning the predecessors of %s" % jobStoreID)
# For each predecessor
for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(
jobStoreID
):
# Reduce the predecessor's number of successors by one to indicate the
# completion of the jobStoreID job
self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1
# If the predecessor job is done and all the successors are complete
if self.toilState.successorCounts[predecessorJob.jobStoreID] == 0:
# Remove it from the set of jobs with active successors
self.toilState.successorCounts.pop(predecessorJob.jobStoreID)
# Pop stack at this point, as we can get rid of its successors
predecessorJob.stack.pop()
# Now we know the job is done we can add it to the list of updated job files
assert predecessorJob not in self.toilState.updatedJobs
self.toilState.updatedJobs.add((predecessorJob, 0))
logger.debug(
"Job %s has all its non-service successors completed or totally "
"failed",
predecessorJob.jobStoreID,
)
|
def _updatePredecessorStatus(self, jobStoreID):
"""
Update status of a predecessor for finished successor job.
"""
if jobStoreID in self.toilState.serviceJobStoreIDToPredecessorJob:
# Is a service job
predecessorJob = self.toilState.serviceJobStoreIDToPredecessorJob.pop(
jobStoreID
)
self.toilState.servicesIssued[predecessorJob.jobStoreID].pop(jobStoreID)
if (
len(self.toilState.servicesIssued[predecessorJob.jobStoreID]) == 0
): # Predecessor job has
# all its services terminated
self.toilState.servicesIssued.pop(
predecessorJob.jobStoreID
) # The job has no running services
self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know
# the job is done we can add it to the list of updated job files
logger.debug(
"Job %s services have completed or totally failed, adding to updated jobs"
% predecessorJob.jobStoreID
)
elif jobStoreID not in self.toilState.successorJobStoreIDToPredecessorJobs:
# We have reach the root job
assert len(self.toilState.updatedJobs) == 0
assert len(self.toilState.successorJobStoreIDToPredecessorJobs) == 0
assert len(self.toilState.successorCounts) == 0
else:
for predecessorJob in self.toilState.successorJobStoreIDToPredecessorJobs.pop(
jobStoreID
):
self.toilState.successorCounts[predecessorJob.jobStoreID] -= 1
assert self.toilState.successorCounts[predecessorJob.jobStoreID] >= 0
if (
self.toilState.successorCounts[predecessorJob.jobStoreID] == 0
): # Job is done
self.toilState.successorCounts.pop(predecessorJob.jobStoreID)
predecessorJob.stack.pop()
logger.debug(
"Job %s has all its non-service successors completed or totally "
"failed",
predecessorJob.jobStoreID,
)
assert predecessorJob not in self.toilState.updatedJobs
self.toilState.updatedJobs.add((predecessorJob, 0)) # Now we know
|
https://github.com/DataBiosphere/toil/issues/1122
|
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: The jobWrapper seems to have left a log file, indicating failure: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: Reporting file: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: ---TOIL WORKER OUTPUT LOG---
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:rdflib:RDFLib Version: 4.2.1
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:CACHE: Read file with ID 'a/2/tmp2RYy7z.tmp' from the cache.
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:cwltool:[job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: 'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:cwltool:'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:[job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: Final process status is permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:Final process status is permanentFail
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:LOG-TO-MASTER: Successfully deleted local copies of file with ID 'a/2/tmp2RYy7z.tmp'.
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: Traceback (most recent call last):
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/worker.py", line 331, in main
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: job._runner(jobWrapper=jobWrapper, jobStore=jobStore, fileStore=fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1085, in _runner
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: returnValues = self._run(jobWrapper, fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1033, in _run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: return self.run(fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 256, in run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: **opts)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/cwltool/main.py", line 218, in single_job_executor
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: raise WorkflowException(u"Process status is %s" % (final_status))
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WorkflowException: Process status is ['permanentFail']
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job B/U/jobWBMw6I to 0
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: Job: B/U/jobWBMw6I is completely failed
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: No jobs left to run so exiting.
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Finished the main loop
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Waiting for stats and logging collator thread to finish ...
kunkel 2016-08-26 11:18:31,638 MainThread INFO toil.leader: ... finished collating stats and logs. Took 0.321724891663 seconds
kunkel 2016-08-26 11:18:31,639 MainThread INFO toil.leader: Waiting for service manager thread to finish ...
kunkel 2016-08-26 11:18:32,129 MainThread INFO toil.leader: ... finished shutting down the service manager. Took 0.490026950836 seconds
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.5.0a1', 'console_scripts', 'cwltoil')()
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 665, in main
outobj = toil.start(wf1)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 580, in start
return self._runMainLoop(job)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 794, in _runMainLoop
jobCache=self._jobCache)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 692, in mainLoop
innerLoop(jobStore, config, batchSystem, toilState, jobBatcher, serviceManager, statsAndLogging)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 970, in innerLoop
assert toilState.successorCounts == {}
AssertionError
|
AssertionError
|
def __init__(self, jobStore, rootJob, jobCache=None):
# This is a hash of jobs, referenced by jobStoreID, to their predecessor jobs.
self.successorJobStoreIDToPredecessorJobs = {}
# Hash of jobStoreIDs to counts of numbers of successors issued.
# There are no entries for jobs
# without successors in this map.
self.successorCounts = {}
# This is a hash of service jobs, referenced by jobStoreID, to their predecessor job
self.serviceJobStoreIDToPredecessorJob = {}
# Hash of jobStoreIDs to maps of services issued for the job
# Each for job, the map is a dictionary of service jobStoreIDs
# to the flags used to communicate the with service
self.servicesIssued = {}
# Jobs that are ready to be processed
self.updatedJobs = set()
# The set of totally failed jobs - this needs to be filtered at the
# end to remove jobs that were removed by checkpoints
self.totalFailedJobs = set()
# Jobs (as jobStoreIDs) with successors that have totally failed
self.hasFailedSuccessors = set()
# The set of successors of failed jobs as a set of jobStoreIds
self.failedSuccessors = set()
# Set of jobs that have multiple predecessors that have one or more predecessors
# finished, but not all of them. This acts as a cache for these jobs.
# Stored as hash from jobStoreIDs to jobWrappers
self.jobsToBeScheduledWithMultiplePredecessors = {}
##Algorithm to build this information
logger.info("(Re)building internal scheduler state")
self._buildToilState(rootJob, jobStore, jobCache)
|
def __init__(self, jobStore, rootJob, jobCache=None):
# This is a hash of jobs, referenced by jobStoreID, to their predecessor jobs.
self.successorJobStoreIDToPredecessorJobs = {}
# Hash of jobStoreIDs to counts of numbers of successors issued.
# There are no entries for jobs
# without successors in this map.
self.successorCounts = {}
# This is a hash of service jobs, referenced by jobStoreID, to their predecessor job
self.serviceJobStoreIDToPredecessorJob = {}
# Hash of jobStoreIDs to maps of services issued for the job
# Each for job, the map is a dictionary of service jobStoreIDs
# to the flags used to communicate the with service
self.servicesIssued = {}
# Jobs (as jobStoreIDs) with successors that have totally failed
self.hasFailedSuccessors = set()
# Jobs that are ready to be processed
self.updatedJobs = set()
# The set of totally failed jobs - this needs to be filtered at the
# end to remove jobs that were removed by checkpoints
self.totalFailedJobs = set()
##Algorithm to build this information
logger.info("(Re)building internal scheduler state")
self._buildToilState(rootJob, jobStore, jobCache)
|
https://github.com/DataBiosphere/toil/issues/1122
|
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: The jobWrapper seems to have left a log file, indicating failure: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: Reporting file: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: ---TOIL WORKER OUTPUT LOG---
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:rdflib:RDFLib Version: 4.2.1
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:CACHE: Read file with ID 'a/2/tmp2RYy7z.tmp' from the cache.
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:cwltool:[job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: 'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:cwltool:'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:[job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: Final process status is permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:Final process status is permanentFail
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:LOG-TO-MASTER: Successfully deleted local copies of file with ID 'a/2/tmp2RYy7z.tmp'.
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: Traceback (most recent call last):
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/worker.py", line 331, in main
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: job._runner(jobWrapper=jobWrapper, jobStore=jobStore, fileStore=fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1085, in _runner
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: returnValues = self._run(jobWrapper, fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1033, in _run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: return self.run(fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 256, in run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: **opts)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/cwltool/main.py", line 218, in single_job_executor
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: raise WorkflowException(u"Process status is %s" % (final_status))
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WorkflowException: Process status is ['permanentFail']
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job B/U/jobWBMw6I to 0
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: Job: B/U/jobWBMw6I is completely failed
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: No jobs left to run so exiting.
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Finished the main loop
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Waiting for stats and logging collator thread to finish ...
kunkel 2016-08-26 11:18:31,638 MainThread INFO toil.leader: ... finished collating stats and logs. Took 0.321724891663 seconds
kunkel 2016-08-26 11:18:31,639 MainThread INFO toil.leader: Waiting for service manager thread to finish ...
kunkel 2016-08-26 11:18:32,129 MainThread INFO toil.leader: ... finished shutting down the service manager. Took 0.490026950836 seconds
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.5.0a1', 'console_scripts', 'cwltoil')()
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 665, in main
outobj = toil.start(wf1)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 580, in start
return self._runMainLoop(job)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 794, in _runMainLoop
jobCache=self._jobCache)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 692, in mainLoop
innerLoop(jobStore, config, batchSystem, toilState, jobBatcher, serviceManager, statsAndLogging)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 970, in innerLoop
assert toilState.successorCounts == {}
AssertionError
|
AssertionError
|
def _buildToilState(self, jobWrapper, jobStore, jobCache=None):
"""
Traverses tree of jobs from the root jobWrapper (rootJob) building the
ToilState class.
If jobCache is passed, it must be a dict from job ID to JobWrapper
object. Jobs will be loaded from the cache (which can be downloaded from
the jobStore in a batch) instead of piecemeal when recursed into.
"""
def getJob(jobId):
if jobCache is not None:
try:
return jobCache[jobId]
except ValueError:
return jobStore.load(jobId)
else:
return jobStore.load(jobId)
# If the jobWrapper has a command, is a checkpoint, has services or is ready to be
# deleted it is ready to be processed
if (
jobWrapper.command is not None
or jobWrapper.checkpoint is not None
or len(jobWrapper.services) > 0
or len(jobWrapper.stack) == 0
):
logger.debug(
"Found job to run: %s, with command: %s, with checkpoint: %s, "
"with services: %s, with stack: %s",
jobWrapper.jobStoreID,
jobWrapper.command is not None,
jobWrapper.checkpoint is not None,
len(jobWrapper.services) > 0,
len(jobWrapper.stack) == 0,
)
self.updatedJobs.add((jobWrapper, 0))
if jobWrapper.checkpoint is not None:
jobWrapper.command = jobWrapper.checkpoint
else: # There exist successors
logger.debug(
"Adding job: %s to the state with %s successors"
% (jobWrapper.jobStoreID, len(jobWrapper.stack[-1]))
)
# Record the number of successors
self.successorCounts[jobWrapper.jobStoreID] = len(jobWrapper.stack[-1])
def processSuccessorWithMultiplePredecessors(successorJobWrapper):
# If jobWrapper job is not reported as complete by the successor
if jobWrapper.jobStoreID not in successorJobWrapper.predecessorsFinished:
# Update the sucessor's status to mark the predecessor complete
successorJobWrapper.predecessorsFinished.add(jobWrapper.jobStoreID)
# If the successor has no predecessors to finish
assert (
len(successorJobWrapper.predecessorsFinished)
<= successorJobWrapper.predecessorNumber
)
if (
len(successorJobWrapper.predecessorsFinished)
== successorJobWrapper.predecessorNumber
):
# It is ready to be run, so remove it from the cache
self.jobsToBeScheduledWithMultiplePredecessors.pop(successorJobStoreID)
# Recursively consider the successor
self._buildToilState(successorJobWrapper, jobStore, jobCache=jobCache)
# For each successor
for successorJobStoreTuple in jobWrapper.stack[-1]:
successorJobStoreID = successorJobStoreTuple[0]
# If the successor jobWrapper does not yet point back at a
# predecessor we have not yet considered it
if successorJobStoreID not in self.successorJobStoreIDToPredecessorJobs:
# Add the job as a predecessor
self.successorJobStoreIDToPredecessorJobs[successorJobStoreID] = [
jobWrapper
]
# If predecessorJobStoreID is not None then the successor has multiple predecessors
predecessorJobStoreID = successorJobStoreTuple[-1]
if predecessorJobStoreID != None:
# We load the successor job
successorJobWrapper = getJob(successorJobStoreID)
# We put the successor job in the cache of successor jobs with multiple predecessors
assert (
successorJobStoreID
not in self.jobsToBeScheduledWithMultiplePredecessors
)
self.jobsToBeScheduledWithMultiplePredecessors[
successorJobStoreID
] = successorJobWrapper
# Process successor
processSuccessorWithMultiplePredecessors(successorJobWrapper)
else:
# The successor has only the jobWrapper job as a predecessor so
# recursively consider the successor
self._buildToilState(
getJob(successorJobStoreID), jobStore, jobCache=jobCache
)
else:
# We've already seen the successor
# Add the job as a predecessor
assert (
jobWrapper
not in self.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
]
)
self.successorJobStoreIDToPredecessorJobs[successorJobStoreID].append(
jobWrapper
)
# If the successor has multiple predecessors
if (
successorJobStoreID
in self.jobsToBeScheduledWithMultiplePredecessors
):
# Get the successor from cache
successorJobWrapper = (
self.jobsToBeScheduledWithMultiplePredecessors[
successorJobStoreID
]
)
# Process successor
processSuccessorWithMultiplePredecessors(successorJobWrapper)
|
def _buildToilState(self, jobWrapper, jobStore, jobCache=None):
"""
Traverses tree of jobs from the root jobWrapper (rootJob) building the
ToilState class.
If jobCache is passed, it must be a dict from job ID to JobWrapper
object. Jobs will be loaded from the cache (which can be downloaded from
the jobStore in a batch) instead of piecemeal when recursed into.
"""
def getJob(jobId):
if jobCache is not None:
try:
return jobCache[jobId]
except ValueError:
return jobStore.load(jobId)
else:
return jobStore.load(jobId)
# If the jobWrapper has a command, is a checkpoint, has services or is ready to be
# deleted it is ready to be processed
if (
jobWrapper.command is not None
or jobWrapper.checkpoint is not None
or len(jobWrapper.services) > 0
or len(jobWrapper.stack) == 0
):
logger.debug(
"Found job to run: %s, with command: %s, with checkpoint: %s, "
"with services: %s, with stack: %s",
jobWrapper.jobStoreID,
jobWrapper.command is not None,
jobWrapper.checkpoint is not None,
len(jobWrapper.services) > 0,
len(jobWrapper.stack) == 0,
)
self.updatedJobs.add((jobWrapper, 0))
if jobWrapper.checkpoint is not None:
jobWrapper.command = jobWrapper.checkpoint
else: # There exist successors
self.successorCounts[jobWrapper.jobStoreID] = len(jobWrapper.stack[-1])
for successorJobStoreTuple in jobWrapper.stack[-1]:
successorJobStoreID = successorJobStoreTuple[0]
if successorJobStoreID not in self.successorJobStoreIDToPredecessorJobs:
# Given that the successor jobWrapper does not yet point back at a
# predecessor we have not yet considered it, so we call the function
# on the successor
self.successorJobStoreIDToPredecessorJobs[successorJobStoreID] = [
jobWrapper
]
self._buildToilState(
getJob(successorJobStoreID), jobStore, jobCache=jobCache
)
else:
# We have already looked at the successor, so we don't recurse,
# but we add back a predecessor link
self.successorJobStoreIDToPredecessorJobs[successorJobStoreID].append(
jobWrapper
)
|
https://github.com/DataBiosphere/toil/issues/1122
|
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: The jobWrapper seems to have left a log file, indicating failure: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: Reporting file: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: ---TOIL WORKER OUTPUT LOG---
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:rdflib:RDFLib Version: 4.2.1
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:CACHE: Read file with ID 'a/2/tmp2RYy7z.tmp' from the cache.
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:cwltool:[job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: 'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:cwltool:'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:[job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: Final process status is permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:Final process status is permanentFail
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:LOG-TO-MASTER: Successfully deleted local copies of file with ID 'a/2/tmp2RYy7z.tmp'.
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: Traceback (most recent call last):
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/worker.py", line 331, in main
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: job._runner(jobWrapper=jobWrapper, jobStore=jobStore, fileStore=fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1085, in _runner
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: returnValues = self._run(jobWrapper, fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1033, in _run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: return self.run(fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 256, in run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: **opts)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/cwltool/main.py", line 218, in single_job_executor
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: raise WorkflowException(u"Process status is %s" % (final_status))
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WorkflowException: Process status is ['permanentFail']
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job B/U/jobWBMw6I to 0
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: Job: B/U/jobWBMw6I is completely failed
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: No jobs left to run so exiting.
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Finished the main loop
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Waiting for stats and logging collator thread to finish ...
kunkel 2016-08-26 11:18:31,638 MainThread INFO toil.leader: ... finished collating stats and logs. Took 0.321724891663 seconds
kunkel 2016-08-26 11:18:31,639 MainThread INFO toil.leader: Waiting for service manager thread to finish ...
kunkel 2016-08-26 11:18:32,129 MainThread INFO toil.leader: ... finished shutting down the service manager. Took 0.490026950836 seconds
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.5.0a1', 'console_scripts', 'cwltoil')()
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 665, in main
outobj = toil.start(wf1)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 580, in start
return self._runMainLoop(job)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 794, in _runMainLoop
jobCache=self._jobCache)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 692, in mainLoop
innerLoop(jobStore, config, batchSystem, toilState, jobBatcher, serviceManager, statsAndLogging)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 970, in innerLoop
assert toilState.successorCounts == {}
AssertionError
|
AssertionError
|
def innerLoop(
jobStore,
config,
batchSystem,
toilState,
jobBatcher,
serviceManager,
statsAndLogging,
):
"""
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore:
:param toil.common.Config config:
:param toil.batchSystems.abstractBatchSystem.AbstractBatchSystem batchSystem:
:param ToilState toilState:
:param JobBatcher jobBatcher:
:param ServiceManager serviceManager:
:param StatsAndLogging statsAndLogging:
"""
# Putting this in separate function for easier reading
# Sets up the timing of the jobWrapper rescuing method
timeSinceJobsLastRescued = time.time()
logger.info("Starting the main loop")
while True:
# Process jobs that are ready to be scheduled/have successors to schedule
if len(toilState.updatedJobs) > 0:
logger.debug(
"Built the jobs list, currently have %i jobs to update and %i jobs issued",
len(toilState.updatedJobs),
jobBatcher.getNumberOfJobsIssued(),
)
updatedJobs = toilState.updatedJobs # The updated jobs to consider below
toilState.updatedJobs = set() # Resetting the list for the next set
for jobWrapper, resultStatus in updatedJobs:
logger.debug(
"Updating status of job: %s with result status: %s",
jobWrapper.jobStoreID,
resultStatus,
)
# This stops a job with services being issued by the serviceManager from
# being considered further in this loop. This catch is necessary because
# the job's service's can fail while being issued, causing the job to be
# added to updated jobs.
if jobWrapper in serviceManager.jobWrappersWithServicesBeingStarted:
logger.debug(
"Got a job to update which is still owned by the service "
"manager: %s",
jobWrapper.jobStoreID,
)
continue
# If some of the jobs successors failed then either fail the job
# or restart it if it has retries left and is a checkpoint job
if jobWrapper.jobStoreID in toilState.hasFailedSuccessors:
# If the job has services running, signal for them to be killed
# once they are killed then the jobWrapper will be re-added to the
# updatedJobs set and then scheduled to be removed
if jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its services due to successor failure",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=True
)
# If the job has non-service jobs running wait for them to finish
# the job will be re-added to the updated jobs when these jobs are done
elif jobWrapper.jobStoreID in toilState.successorCounts:
logger.debug(
"Job: %s with failed successors still has successor jobs running",
jobWrapper.jobStoreID,
)
continue
# If the job is a checkpoint and has remaining retries then reissue it.
elif (
jobWrapper.checkpoint is not None
and jobWrapper.remainingRetryCount > 0
):
logger.warn(
"Job: %s is being restarted as a checkpoint after the total "
"failure of jobs in its subtree.",
jobWrapper.jobStoreID,
)
jobBatcher.issueJob(
jobWrapper.jobStoreID,
memory=jobWrapper.memory,
cores=jobWrapper.cores,
disk=jobWrapper.disk,
preemptable=jobWrapper.preemptable,
)
else: # Mark it totally failed
logger.debug(
"Job %s is being processed as completely failed",
jobWrapper.jobStoreID,
)
jobBatcher.processTotallyFailedJob(jobWrapper)
# If the jobWrapper has a command it must be run before any successors.
# Similarly, if the job previously failed we rerun it, even if it doesn't have a
# command to run, to eliminate any parts of the stack now completed.
elif jobWrapper.command is not None or resultStatus != 0:
isServiceJob = (
jobWrapper.jobStoreID
in toilState.serviceJobStoreIDToPredecessorJob
)
# If the job has run out of retries or is a service job whose error flag has
# been indicated, fail the job.
if (
jobWrapper.remainingRetryCount == 0
or isServiceJob
and not jobStore.fileExists(jobWrapper.errorJobStoreID)
):
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is completely failed", jobWrapper.jobStoreID
)
else:
# Otherwise try the job again
jobBatcher.issueJob(
jobWrapper.jobStoreID,
jobWrapper.memory,
jobWrapper.cores,
jobWrapper.disk,
jobWrapper.preemptable,
)
# If the job has services to run, which have not been started, start them
elif len(jobWrapper.services) > 0:
# Build a map from the service jobs to the job and a map
# of the services created for the job
assert jobWrapper.jobStoreID not in toilState.servicesIssued
toilState.servicesIssued[jobWrapper.jobStoreID] = {}
for serviceJobList in jobWrapper.services:
for serviceTuple in serviceJobList:
serviceID = serviceTuple[0]
assert (
serviceID
not in toilState.serviceJobStoreIDToPredecessorJob
)
toilState.serviceJobStoreIDToPredecessorJob[serviceID] = (
jobWrapper
)
toilState.servicesIssued[jobWrapper.jobStoreID][
serviceID
] = serviceTuple[4:7]
# Use the service manager to start the services
serviceManager.scheduleServices(jobWrapper)
logger.debug(
"Giving job: %s to service manager to schedule its jobs",
jobWrapper.jobStoreID,
)
# There exist successors to run
elif len(jobWrapper.stack) > 0:
assert len(jobWrapper.stack[-1]) > 0
logger.debug(
"Job: %s has %i successors to schedule",
jobWrapper.jobStoreID,
len(jobWrapper.stack[-1]),
)
# Record the number of successors that must be completed before
# the jobWrapper can be considered again
assert jobWrapper.jobStoreID not in toilState.successorCounts
toilState.successorCounts[jobWrapper.jobStoreID] = len(
jobWrapper.stack[-1]
)
# List of successors to schedule
successors = []
# For each successor schedule if all predecessors have been completed
for (
successorJobStoreID,
memory,
cores,
disk,
preemptable,
predecessorID,
) in jobWrapper.stack[-1]:
# Build map from successor to predecessors.
if (
successorJobStoreID
not in toilState.successorJobStoreIDToPredecessorJobs
):
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
] = []
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
].append(jobWrapper)
# Case that the jobWrapper has multiple predecessors
if predecessorID is not None:
logger.debug(
"Successor job: %s of job: %s has multiple predecessors"
% (successorJobStoreID, jobWrapper.jobStoreID)
)
# Get the successor job, using a cache
# (if the successor job has already been seen it will be in this cache,
# but otherwise put it in the cache)
if (
successorJobStoreID
not in toilState.jobsToBeScheduledWithMultiplePredecessors
):
toilState.jobsToBeScheduledWithMultiplePredecessors[
successorJobStoreID
] = jobStore.load(successorJobStoreID)
successorJobWrapper = (
toilState.jobsToBeScheduledWithMultiplePredecessors[
successorJobStoreID
]
)
# Add the jobWrapper job as a finished predecessor to the successor
successorJobWrapper.predecessorsFinished.add(predecessorID)
# If the successor is in the set of successors of failed jobs
if successorJobStoreID in toilState.failedSuccessors:
logger.debug(
"Successor job: %s of job: %s has failed predecessors"
% (successorJobStoreID, jobWrapper.jobStoreID)
)
# Add the job to the set having failed successors
toilState.hasFailedSuccessors.add(jobWrapper.jobStoreID)
# Reduce active successor count and remove the successor as an active successor of the job
toilState.successorCounts[jobWrapper.jobStoreID] -= 1
assert (
toilState.successorCounts[jobWrapper.jobStoreID]
>= 0
)
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
].remove(jobWrapper)
if (
len(
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
]
)
== 0
):
toilState.successorJobStoreIDToPredecessorJobs.pop(
successorJobStoreID
)
# If the job now has no active successors add to active jobs
# so it can be processed as a job with failed successors
if (
toilState.successorCounts[jobWrapper.jobStoreID]
== 0
):
logger.debug(
"Job: %s has no successors to run and some are failed, adding to list of jobs with failed successors"
% jobWrapper.jobStoreID
)
toilState.successorCounts.pop(jobWrapper.jobStoreID)
toilState.updatedJobs.add((jobWrapper, 0))
continue
# If the successor job's predecessors have all not all completed then
# ignore the jobWrapper as is not yet ready to run
assert (
len(successorJobWrapper.predecessorsFinished)
<= successorJobWrapper.predecessorNumber
)
if (
len(successorJobWrapper.predecessorsFinished)
< successorJobWrapper.predecessorNumber
):
continue
else:
# Remove the successor job from the cache
toilState.jobsToBeScheduledWithMultiplePredecessors.pop(
successorJobStoreID
)
# Add successor to list of successors to schedule
successors.append(
(successorJobStoreID, memory, cores, disk, preemptable)
)
jobBatcher.issueJobs(successors)
elif jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its services due to the successful completion of its successor jobs",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=False
)
# There are no remaining tasks to schedule within the jobWrapper, but
# we schedule it anyway to allow it to be deleted.
# TODO: An alternative would be simple delete it here and add it to the
# list of jobs to process, or (better) to create an asynchronous
# process that deletes jobs and then feeds them back into the set
# of jobs to be processed
else:
# Remove the job
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
memory=config.defaultMemory,
cores=config.defaultCores,
disk=config.defaultDisk,
# We allow this cleanup to potentially occur on a
# preemptable instance.
preemptable=True,
)
logger.debug(
"Job: %s is empty, we are scheduling to clean it up",
jobWrapper.jobStoreID,
)
else:
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is empty but completely failed - something is very wrong",
jobWrapper.jobStoreID,
)
# The exit criterion
if (
len(toilState.updatedJobs) == 0
and jobBatcher.getNumberOfJobsIssued() == 0
and serviceManager.serviceJobsIssuedToServiceManager == 0
):
logger.info("No jobs left to run so exiting.")
break
# Start any service jobs available from the service manager
while True:
serviceJobTuple = serviceManager.getServiceJobsToStart(0)
# Stop trying to get jobs when function returns None
if serviceJobTuple is None:
break
serviceJobStoreID, memory, cores, disk = serviceJobTuple
logger.debug("Launching service job: %s", serviceJobStoreID)
# This loop issues the jobs to the batch system because the batch system is not
# thread-safe. FIXME: don't understand this comment
jobBatcher.issueJob(serviceJobStoreID, memory, cores, disk, False)
# Get jobs whose services have started
while True:
jobWrapper = serviceManager.getJobWrapperWhoseServicesAreRunning(0)
if jobWrapper is None: # Stop trying to get jobs when function returns None
break
logger.debug("Job: %s has established its services.", jobWrapper.jobStoreID)
jobWrapper.services = []
toilState.updatedJobs.add((jobWrapper, 0))
# Gather any new, updated jobWrapper from the batch system
updatedJob = batchSystem.getUpdatedBatchJob(2)
if updatedJob is not None:
jobBatchSystemID, result, wallTime = updatedJob
if jobBatcher.hasJob(jobBatchSystemID):
if result == 0:
logger.debug(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s ended successfully",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
)
else:
logger.warn(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s failed with exit value %i",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
result,
)
jobBatcher.processFinishedJob(
jobBatchSystemID, result, wallTime=wallTime
)
else:
logger.warn(
"A result seems to already have been processed "
"for jobWrapper with batch system ID: %i",
jobBatchSystemID,
)
else:
# Process jobs that have gone awry
# In the case that there is nothing happening
# (no updated jobWrapper to gather for 10 seconds)
# check if their are any jobs that have run too long
# (see JobBatcher.reissueOverLongJobs) or which
# have gone missing from the batch system (see JobBatcher.reissueMissingJobs)
if (
time.time() - timeSinceJobsLastRescued >= config.rescueJobsFrequency
): # We only
# rescue jobs every N seconds, and when we have
# apparently exhausted the current jobWrapper supply
jobBatcher.reissueOverLongJobs()
logger.info("Reissued any over long jobs")
hasNoMissingJobs = jobBatcher.reissueMissingJobs()
if hasNoMissingJobs:
timeSinceJobsLastRescued = time.time()
else:
timeSinceJobsLastRescued += 60 # This means we'll try again
# in a minute, providing things are quiet
logger.info("Rescued any (long) missing jobs")
# Check on the associated threads and exit if a failure is detected
statsAndLogging.check()
serviceManager.check()
# the cluster scaler object will only be instantiated if autoscaling is enabled
if jobBatcher.clusterScaler is not None:
jobBatcher.clusterScaler.check()
logger.info("Finished the main loop")
# Consistency check the toil state
assert toilState.updatedJobs == set()
assert toilState.successorCounts == {}
assert toilState.successorJobStoreIDToPredecessorJobs == {}
assert toilState.serviceJobStoreIDToPredecessorJob == {}
assert toilState.servicesIssued == {}
|
def innerLoop(
jobStore,
config,
batchSystem,
toilState,
jobBatcher,
serviceManager,
statsAndLogging,
):
"""
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore:
:param toil.common.Config config:
:param toil.batchSystems.abstractBatchSystem.AbstractBatchSystem batchSystem:
:param ToilState toilState:
:param JobBatcher jobBatcher:
:param ServiceManager serviceManager:
:param StatsAndLogging statsAndLogging:
"""
# Putting this in separate function for easier reading
# Sets up the timing of the jobWrapper rescuing method
timeSinceJobsLastRescued = time.time()
logger.info("Starting the main loop")
while True:
# Process jobs that are ready to be scheduled/have successors to schedule
if len(toilState.updatedJobs) > 0:
logger.debug(
"Built the jobs list, currently have %i jobs to update and %i jobs issued",
len(toilState.updatedJobs),
jobBatcher.getNumberOfJobsIssued(),
)
updatedJobs = toilState.updatedJobs # The updated jobs to consider below
toilState.updatedJobs = set() # Resetting the list for the next set
for jobWrapper, resultStatus in updatedJobs:
logger.debug(
"Updating status of job: %s with result status: %s",
jobWrapper.jobStoreID,
resultStatus,
)
# This stops a job with services being issued by the serviceManager from
# being considered further in this loop. This catch is necessary because
# the job's service's can fail while being issued, causing the job to be
# added to updated jobs.
if jobWrapper in serviceManager.jobWrappersWithServicesBeingStarted:
logger.debug(
"Got a job to update which is still owned by the service "
"manager: %s",
jobWrapper.jobStoreID,
)
continue
# If some of the jobs successors failed then either fail the job
# or restart it if it has retries left and is a checkpoint job
if jobWrapper.jobStoreID in toilState.hasFailedSuccessors:
# If the job has services running, signal for them to be killed
# once they are killed then the jobWrapper will be re-added to the
# updatedJobs set and then scheduled to be removed
if jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its services due to successor failure",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=True
)
# If the job has non-service jobs running wait for them to finish
# the job will be re-added to the updated jobs when these jobs are done
elif jobWrapper.jobStoreID in toilState.successorCounts:
logger.debug(
"Job: %s with failed successors still has successor jobs running",
jobWrapper.jobStoreID,
)
continue
# If the job is a checkpoint and has remaining retries then reissue it.
elif (
jobWrapper.checkpoint is not None
and jobWrapper.remainingRetryCount > 0
):
logger.warn(
"Job: %s is being restarted as a checkpoint after the total "
"failure of jobs in its subtree.",
jobWrapper.jobStoreID,
)
jobBatcher.issueJob(
jobWrapper.jobStoreID,
memory=jobWrapper.memory,
cores=jobWrapper.cores,
disk=jobWrapper.disk,
preemptable=jobWrapper.preemptable,
)
else: # Mark it totally failed
logger.debug(
"Job %s is being processed as completely failed",
jobWrapper.jobStoreID,
)
jobBatcher.processTotallyFailedJob(jobWrapper)
# If the jobWrapper has a command it must be run before any successors.
# Similarly, if the job previously failed we rerun it, even if it doesn't have a
# command to run, to eliminate any parts of the stack now completed.
elif jobWrapper.command is not None or resultStatus != 0:
isServiceJob = (
jobWrapper.jobStoreID
in toilState.serviceJobStoreIDToPredecessorJob
)
# If the job has run out of retries or is a service job whose error flag has
# been indicated, fail the job.
if (
jobWrapper.remainingRetryCount == 0
or isServiceJob
and not jobStore.fileExists(jobWrapper.errorJobStoreID)
):
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is completely failed", jobWrapper.jobStoreID
)
else:
# Otherwise try the job again
jobBatcher.issueJob(
jobWrapper.jobStoreID,
jobWrapper.memory,
jobWrapper.cores,
jobWrapper.disk,
jobWrapper.preemptable,
)
# If the job has services to run, which have not been started, start them
elif len(jobWrapper.services) > 0:
# Build a map from the service jobs to the job and a map
# of the services created for the job
assert jobWrapper.jobStoreID not in toilState.servicesIssued
toilState.servicesIssued[jobWrapper.jobStoreID] = {}
for serviceJobList in jobWrapper.services:
for serviceTuple in serviceJobList:
serviceID = serviceTuple[0]
assert (
serviceID
not in toilState.serviceJobStoreIDToPredecessorJob
)
toilState.serviceJobStoreIDToPredecessorJob[serviceID] = (
jobWrapper
)
toilState.servicesIssued[jobWrapper.jobStoreID][
serviceID
] = serviceTuple[4:7]
# Use the service manager to start the services
serviceManager.scheduleServices(jobWrapper)
logger.debug(
"Giving job: %s to service manager to schedule its jobs",
jobWrapper.jobStoreID,
)
# There exist successors to run
elif len(jobWrapper.stack) > 0:
assert len(jobWrapper.stack[-1]) > 0
logger.debug(
"Job: %s has %i successors to schedule",
jobWrapper.jobStoreID,
len(jobWrapper.stack[-1]),
)
# Record the number of successors that must be completed before
# the jobWrapper can be considered again
assert jobWrapper.jobStoreID not in toilState.successorCounts
toilState.successorCounts[jobWrapper.jobStoreID] = len(
jobWrapper.stack[-1]
)
# List of successors to schedule
successors = []
# For each successor schedule if all predecessors have been completed
for (
successorJobStoreID,
memory,
cores,
disk,
preemptable,
predecessorID,
) in jobWrapper.stack[-1]:
# Build map from successor to predecessors.
if (
successorJobStoreID
not in toilState.successorJobStoreIDToPredecessorJobs
):
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
] = []
toilState.successorJobStoreIDToPredecessorJobs[
successorJobStoreID
].append(jobWrapper)
# Case that the jobWrapper has multiple predecessors
if predecessorID is not None:
# Load the wrapped jobWrapper
job2 = jobStore.load(successorJobStoreID)
# Remove the predecessor from the list of predecessors
job2.predecessorsFinished.add(predecessorID)
# Checkpoint
jobStore.update(job2)
# If the jobs predecessors have all not all completed then
# ignore the jobWrapper
assert len(job2.predecessorsFinished) >= 1
assert (
len(job2.predecessorsFinished) <= job2.predecessorNumber
)
if len(job2.predecessorsFinished) < job2.predecessorNumber:
continue
successors.append(
(successorJobStoreID, memory, cores, disk, preemptable)
)
jobBatcher.issueJobs(successors)
elif jobWrapper.jobStoreID in toilState.servicesIssued:
logger.debug(
"Telling job: %s to terminate its due to the successful completion of its successor jobs",
jobWrapper.jobStoreID,
)
serviceManager.killServices(
toilState.servicesIssued[jobWrapper.jobStoreID], error=False
)
# There are no remaining tasks to schedule within the jobWrapper, but
# we schedule it anyway to allow it to be deleted.
# TODO: An alternative would be simple delete it here and add it to the
# list of jobs to process, or (better) to create an asynchronous
# process that deletes jobs and then feeds them back into the set
# of jobs to be processed
else:
# Remove the job
if jobWrapper.remainingRetryCount > 0:
jobBatcher.issueJob(
jobWrapper.jobStoreID,
memory=config.defaultMemory,
cores=config.defaultCores,
disk=config.defaultDisk,
# We allow this cleanup to potentially occur on a
# preemptable instance.
preemptable=True,
)
logger.debug(
"Job: %s is empty, we are scheduling to clean it up",
jobWrapper.jobStoreID,
)
else:
jobBatcher.processTotallyFailedJob(jobWrapper)
logger.warn(
"Job: %s is empty but completely failed - something is very wrong",
jobWrapper.jobStoreID,
)
# The exit criterion
if (
len(toilState.updatedJobs) == 0
and jobBatcher.getNumberOfJobsIssued() == 0
and serviceManager.serviceJobsIssuedToServiceManager == 0
):
logger.info("No jobs left to run so exiting.")
break
# Start any service jobs available from the service manager
while True:
serviceJobTuple = serviceManager.getServiceJobsToStart(0)
# Stop trying to get jobs when function returns None
if serviceJobTuple is None:
break
serviceJobStoreID, memory, cores, disk = serviceJobTuple
logger.debug("Launching service job: %s", serviceJobStoreID)
# This loop issues the jobs to the batch system because the batch system is not
# thread-safe. FIXME: don't understand this comment
jobBatcher.issueJob(serviceJobStoreID, memory, cores, disk, False)
# Get jobs whose services have started
while True:
jobWrapper = serviceManager.getJobWrapperWhoseServicesAreRunning(0)
if jobWrapper is None: # Stop trying to get jobs when function returns None
break
logger.debug("Job: %s has established its services.", jobWrapper.jobStoreID)
jobWrapper.services = []
toilState.updatedJobs.add((jobWrapper, 0))
# Gather any new, updated jobWrapper from the batch system
updatedJob = batchSystem.getUpdatedBatchJob(2)
if updatedJob is not None:
jobBatchSystemID, result, wallTime = updatedJob
if jobBatcher.hasJob(jobBatchSystemID):
if result == 0:
logger.debug(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s ended successfully",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
)
else:
logger.warn(
"Batch system is reporting that the jobWrapper with "
"batch system ID: %s and jobWrapper store ID: %s failed with exit value %i",
jobBatchSystemID,
jobBatcher.getJob(jobBatchSystemID),
result,
)
jobBatcher.processFinishedJob(
jobBatchSystemID, result, wallTime=wallTime
)
else:
logger.warn(
"A result seems to already have been processed "
"for jobWrapper with batch system ID: %i",
jobBatchSystemID,
)
else:
# Process jobs that have gone awry
# In the case that there is nothing happening
# (no updated jobWrapper to gather for 10 seconds)
# check if their are any jobs that have run too long
# (see JobBatcher.reissueOverLongJobs) or which
# have gone missing from the batch system (see JobBatcher.reissueMissingJobs)
if (
time.time() - timeSinceJobsLastRescued >= config.rescueJobsFrequency
): # We only
# rescue jobs every N seconds, and when we have
# apparently exhausted the current jobWrapper supply
jobBatcher.reissueOverLongJobs()
logger.info("Reissued any over long jobs")
hasNoMissingJobs = jobBatcher.reissueMissingJobs()
if hasNoMissingJobs:
timeSinceJobsLastRescued = time.time()
else:
timeSinceJobsLastRescued += 60 # This means we'll try again
# in a minute, providing things are quiet
logger.info("Rescued any (long) missing jobs")
# Check on the associated threads and exit if a failure is detected
statsAndLogging.check()
serviceManager.check()
# the cluster scaler object will only be instantiated if autoscaling is enabled
if jobBatcher.clusterScaler is not None:
jobBatcher.clusterScaler.check()
logger.info("Finished the main loop")
# Consistency check the toil state
assert toilState.updatedJobs == set()
assert toilState.successorCounts == {}
assert toilState.successorJobStoreIDToPredecessorJobs == {}
assert toilState.serviceJobStoreIDToPredecessorJob == {}
assert toilState.servicesIssued == {}
|
https://github.com/DataBiosphere/toil/issues/1122
|
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: The jobWrapper seems to have left a log file, indicating failure: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: Reporting file: B/U/jobWBMw6I
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: ---TOIL WORKER OUTPUT LOG---
kunkel 2016-08-26 11:18:31,313 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:rdflib:RDFLib Version: 4.2.1
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:CACHE: Read file with ID 'a/2/tmp2RYy7z.tmp' from the cache.
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:cwltool:[job revtool.cwl] /tmp/user/1001/tmpZ7hLZ6$ rev-fail \
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: /tmp/user/1001/tmppbMf7F/stg5c0b758e-c348-411b-b260-877b9491c6da/whale.txt > /tmp/user/1001/tmpZ7hLZ6/output.txt
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: 'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:cwltool:'rev-fail' not found
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: [job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:[job revtool.cwl] completed permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: Final process status is permanentFail
kunkel 2016-08-26 11:18:31,314 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:cwltool:Final process status is permanentFail
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: INFO:toil.fileStore:LOG-TO-MASTER: Successfully deleted local copies of file with ID 'a/2/tmp2RYy7z.tmp'.
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: Traceback (most recent call last):
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/worker.py", line 331, in main
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: job._runner(jobWrapper=jobWrapper, jobStore=jobStore, fileStore=fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1085, in _runner
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: returnValues = self._run(jobWrapper, fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/job.py", line 1033, in _run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: return self.run(fileStore)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 256, in run
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: **opts)
kunkel 2016-08-26 11:18:31,315 MainThread WARNING toil.leader: B/U/jobWBMw6I: File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/cwltool/main.py", line 218, in single_job_executor
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: raise WorkflowException(u"Process status is %s" % (final_status))
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WorkflowException: Process status is ['permanentFail']
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host kunkel
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job B/U/jobWBMw6I to 0
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: B/U/jobWBMw6I: WARNING:toil.jobWrapper:We have increased the default memory of the failed job to 2147483648 bytes
kunkel 2016-08-26 11:18:31,316 MainThread WARNING toil.leader: Job: B/U/jobWBMw6I is completely failed
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: No jobs left to run so exiting.
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Finished the main loop
kunkel 2016-08-26 11:18:31,316 MainThread INFO toil.leader: Waiting for stats and logging collator thread to finish ...
kunkel 2016-08-26 11:18:31,638 MainThread INFO toil.leader: ... finished collating stats and logs. Took 0.321724891663 seconds
kunkel 2016-08-26 11:18:31,639 MainThread INFO toil.leader: Waiting for service manager thread to finish ...
kunkel 2016-08-26 11:18:32,129 MainThread INFO toil.leader: ... finished shutting down the service manager. Took 0.490026950836 seconds
Traceback (most recent call last):
File "/usr/local/bin/cwltoil", line 9, in <module>
load_entry_point('toil==3.5.0a1', 'console_scripts', 'cwltoil')()
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/cwl/cwltoil.py", line 665, in main
outobj = toil.start(wf1)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 580, in start
return self._runMainLoop(job)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/common.py", line 794, in _runMainLoop
jobCache=self._jobCache)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 692, in mainLoop
innerLoop(jobStore, config, batchSystem, toilState, jobBatcher, serviceManager, statsAndLogging)
File "/home/chapmanb/install/bcbio-vm/anaconda/lib/python2.7/site-packages/toil/leader.py", line 970, in innerLoop
assert toilState.successorCounts == {}
AssertionError
|
AssertionError
|
def distVersion():
"""
The distribution version identifying a published release on PyPI.
"""
from pkg_resources import parse_version
build_number = buildNumber()
parsedBaseVersion = parse_version(baseVersion)
if isinstance(parsedBaseVersion, tuple):
raise RuntimeError(
"Setuptools version 8.0 or newer required. Update by running "
"'pip install setuptools --upgrade'"
)
if build_number is not None and parsedBaseVersion.is_prerelease:
return baseVersion + ".dev" + build_number
else:
return baseVersion
|
def distVersion():
"""
The distribution version identifying a published release on PyPI.
"""
from pkg_resources import parse_version
build_number = buildNumber()
if build_number is not None and parse_version(baseVersion).is_prerelease:
return baseVersion + ".dev" + build_number
else:
return baseVersion
|
https://github.com/DataBiosphere/toil/issues/1323
|
Traceback (most recent call last):
File "version_template.py", line 137, in <module>
_main()
File "version_template.py", line 133, in _main
sys.stdout.write(expand_(*sys.argv[1:]))
File "version_template.py", line 128, in expand_
return resolve(name)
File "version_template.py", line 122, in resolve
v = v()
File "version_template.py", line 61, in distVersion
if build_number is not None and parse_version(baseVersion).is_prerelease:
AttributeError: 'tuple' object has no attribute 'is_prerelease'
|
AttributeError
|
def supportsHotDeployment(cls):
"""
Whether this batch system supports hot deployment of the user script itself. If it does,
the :meth:`setUserScript` can be invoked to set the resource object representing the user
script.
Note to implementors: If your implementation returns True here, it should also override
:rtype: bool
"""
raise NotImplementedError()
|
def supportsHotDeployment(cls):
"""
Whether this batch system supports hot deployment of the user script itself. If it does,
the __init__ method will have to accept an optional parameter in addition to the declared
ones: userScript. It will be an instance of toil.common.HotDeployedResource that
represents the user script.
:rtype: bool
"""
raise NotImplementedError()
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def start(self, rootJob):
"""
Invoke a Toil workflow with the given job as the root for an initial run. This method
must be called in the body of a ``with Toil(...) as toil:`` statement. This method should
not be called more than once for a workflow that has not finished.
:param toil.job.Job rootJob: The root job of the workflow
:return: The root job's return value
"""
self._assertContextManagerUsed()
if self.config.restart:
raise ToilRestartException(
"A Toil workflow can only be started once. Use Toil.restart() to resume it."
)
self._batchSystem = self.createBatchSystem(self.config)
self._setupHotDeployment(rootJob.getUserScript())
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
# Make a file to store the root job's return value in
rootJobReturnValueID = self._jobStore.getEmptyFileStoreID()
# Add the root job return value as a promise
rootJob._rvs[()].append(rootJobReturnValueID)
# Write the name of the promise file in a shared file
with self._jobStore.writeSharedFileStream("rootJobReturnValue") as fH:
fH.write(rootJobReturnValueID)
# Setup the first wrapper and cache it
job = rootJob._serialiseFirstJob(self._jobStore)
self._cacheJob(job)
self._setProvisioner()
return self._runMainLoop(job)
finally:
self._shutdownBatchSystem()
|
def start(self, rootJob):
"""
Invoke a Toil workflow with the given job as the root for an initial run. This method
must be called in the body of a ``with Toil(...) as toil:`` statement. This method should
not be called more than once for a workflow that has not finished.
:param toil.job.Job rootJob: The root job of the workflow
:return: The root job's return value
"""
self._assertContextManagerUsed()
if self.config.restart:
raise ToilRestartException(
"A Toil workflow can only be started once. Use Toil.restart() to resume it."
)
self._batchSystem = self.createBatchSystem(
self.config, jobStore=self._jobStore, userScript=rootJob.getUserScript()
)
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
# Make a file to store the root job's return value in
rootJobReturnValueID = self._jobStore.getEmptyFileStoreID()
# Add the root job return value as a promise
rootJob._rvs[()].append(rootJobReturnValueID)
# Write the name of the promise file in a shared file
with self._jobStore.writeSharedFileStream("rootJobReturnValue") as fH:
fH.write(rootJobReturnValueID)
# Setup the first wrapper and cache it
job = rootJob._serialiseFirstJob(self._jobStore)
self._cacheJob(job)
self._setProvisioner()
return self._runMainLoop(job)
finally:
self._shutdownBatchSystem()
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def restart(self):
"""
Restarts a workflow that has been interrupted. This method should be called if and only
if a workflow has previously been started and has not finished.
:return: The root job's return value
"""
self._assertContextManagerUsed()
if not self.config.restart:
raise ToilRestartException(
"A Toil workflow must be initiated with Toil.start(), not restart()."
)
self._batchSystem = self.createBatchSystem(self.config)
self._setupHotDeployment()
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
self._setProvisioner()
rootJob = self._jobStore.clean(jobCache=self._jobCache)
return self._runMainLoop(rootJob)
finally:
self._shutdownBatchSystem()
|
def restart(self):
"""
Restarts a workflow that has been interrupted. This method should be called if and only
if a workflow has previously been started and has not finished.
:return: The root job's return value
"""
self._assertContextManagerUsed()
if not self.config.restart:
raise ToilRestartException(
"A Toil workflow must be initiated with Toil.start(), not restart()."
)
self._batchSystem = self.createBatchSystem(self.config, jobStore=self._jobStore)
try:
self._setBatchSystemEnvVars()
self._serialiseEnv()
self._cacheAllJobs()
self._setProvisioner()
rootJob = self._jobStore.clean(jobCache=self._jobCache)
return self._runMainLoop(rootJob)
finally:
self._shutdownBatchSystem()
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def createBatchSystem(config):
"""
Creates an instance of the batch system specified in the given config.
:param toil.common.Config config: the current configuration
:rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem
:return: an instance of a concrete subclass of AbstractBatchSystem
"""
kwargs = dict(
config=config,
maxCores=config.maxCores,
maxMemory=config.maxMemory,
maxDisk=config.maxDisk,
)
if config.batchSystem == "parasol":
from toil.batchSystems.parasol import ParasolBatchSystem
batchSystemClass = ParasolBatchSystem
elif (
config.batchSystem == "single_machine" or config.batchSystem == "singleMachine"
):
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
batchSystemClass = SingleMachineBatchSystem
elif config.batchSystem == "gridengine" or config.batchSystem == "gridEngine":
from toil.batchSystems.gridengine import GridengineBatchSystem
batchSystemClass = GridengineBatchSystem
elif config.batchSystem == "lsf" or config.batchSystem == "LSF":
from toil.batchSystems.lsf import LSFBatchSystem
batchSystemClass = LSFBatchSystem
elif config.batchSystem == "mesos" or config.batchSystem == "Mesos":
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
batchSystemClass = MesosBatchSystem
kwargs["masterAddress"] = config.mesosMasterAddress
elif config.batchSystem == "slurm" or config.batchSystem == "Slurm":
from toil.batchSystems.slurm import SlurmBatchSystem
batchSystemClass = SlurmBatchSystem
else:
raise RuntimeError("Unrecognised batch system: %s" % config.batchSystem)
if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup():
raise RuntimeError(
"%s currently does not support shared caching. Set the "
"--disableCaching flag if you want to "
"use this batch system." % config.batchSystem
)
logger.info(
"Using the %s"
% re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()
)
return batchSystemClass(**kwargs)
|
def createBatchSystem(config, jobStore=None, userScript=None):
"""
Creates an instance of the batch system specified in the given config. If a job store and
a user script are given then the user script can be hot deployed into the workflow.
:param toil.common.Config config: the current configuration
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore: an instance of a jobStore
:param ModuleDescriptor userScript: a handle to the Python module defining the root job
:return: an instance of a concrete subclass of AbstractBatchSystem
:rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem
"""
kwargs = dict(
config=config,
maxCores=config.maxCores,
maxMemory=config.maxMemory,
maxDisk=config.maxDisk,
)
if config.batchSystem == "parasol":
from toil.batchSystems.parasol import ParasolBatchSystem
batchSystemClass = ParasolBatchSystem
elif (
config.batchSystem == "single_machine" or config.batchSystem == "singleMachine"
):
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
batchSystemClass = SingleMachineBatchSystem
elif config.batchSystem == "gridengine" or config.batchSystem == "gridEngine":
from toil.batchSystems.gridengine import GridengineBatchSystem
batchSystemClass = GridengineBatchSystem
elif config.batchSystem == "lsf" or config.batchSystem == "LSF":
from toil.batchSystems.lsf import LSFBatchSystem
batchSystemClass = LSFBatchSystem
elif config.batchSystem == "mesos" or config.batchSystem == "Mesos":
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
batchSystemClass = MesosBatchSystem
kwargs["masterAddress"] = config.mesosMasterAddress
elif config.batchSystem == "slurm" or config.batchSystem == "Slurm":
from toil.batchSystems.slurm import SlurmBatchSystem
batchSystemClass = SlurmBatchSystem
else:
raise RuntimeError("Unrecognised batch system: %s" % config.batchSystem)
if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup():
raise RuntimeError(
"%s currently does not support shared caching. Set the "
"--disableCaching flag if you want to "
"use this batch system." % config.batchSystem
)
logger.info(
"Using the %s"
% re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()
)
if jobStore is not None:
if userScript is not None:
if (
not userScript.belongsToToil
and batchSystemClass.supportsHotDeployment()
):
userScriptResource = userScript.saveAsResourceTo(jobStore)
with jobStore.writeSharedFileStream("userScript") as f:
f.write(userScriptResource.pickle())
kwargs["userScript"] = userScriptResource
else:
from toil.jobStores.abstractJobStore import NoSuchFileException
try:
with jobStore.readSharedFileStream("userScript") as f:
userScriptResource = Resource.unpickle(f.read())
kwargs["userScript"] = userScriptResource
except NoSuchFileException:
pass
return batchSystemClass(**kwargs)
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def supportsHotDeployment(cls):
"""
Whether this batch system supports hot deployment of the user script itself. If it does,
the :meth:`setUserScript` can be invoked to set the resource object representing the user
script.
Note to implementors: If your implementation returns True here, it should also override
:rtype: bool
"""
raise NotImplementedError()
|
def supportsHotDeployment(cls):
"""
Whether this batch system supports hot deployment of the user script and toil itself. If
it does, the __init__ method will have to accept two optional parameters in addition to
the declared ones: userScript and toilDistribution. Both will be instances of
toil.common.HotDeployedResource that represent the user script and a source tarball (
sdist) of toil respectively.
:rtype: bool
"""
raise NotImplementedError()
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def issueBatchJob(self, command, memory, cores, disk, preemptable):
"""
Issues the following command returning a unique jobID. Command is the string to run, memory
is an int giving the number of bytes the job needs to run in and cores is the number of cpus
needed for the job and error-file is the path of the file to place any std-err/std-out in.
"""
self.checkResourceRequest(memory, cores, disk)
jobID = next(self.unusedJobID)
job = ToilJob(
jobID=jobID,
resources=ResourceRequirement(
memory=memory, cores=cores, disk=disk, preemptable=preemptable
),
command=command,
userScript=self.userScript,
environment=self.environment.copy(),
workerCleanupInfo=self.workerCleanupInfo,
)
jobType = job.resources
log.debug("Queueing the job command: %s with job id: %s ...", command, str(jobID))
self.jobQueues[jobType].append(job)
log.debug("... queued")
return jobID
|
def issueBatchJob(self, command, memory, cores, disk, preemptable):
"""
Issues the following command returning a unique jobID. Command is the string to run, memory
is an int giving the number of bytes the job needs to run in and cores is the number of cpus
needed for the job and error-file is the path of the file to place any std-err/std-out in.
"""
self.checkResourceRequest(memory, cores, disk)
jobID = next(self.unusedJobID)
job = ToilJob(
jobID=jobID,
resources=ResourceRequirement(
memory=memory, cores=cores, disk=disk, preemptable=preemptable
),
command=command,
userScript=self.userScript,
toilDistribution=self.toilDistribution,
environment=self.environment.copy(),
workerCleanupInfo=self.workerCleanupInfo,
)
jobType = job.resources
log.debug("Queueing the job command: %s with job id: %s ...", command, str(jobID))
self.jobQueues[jobType].append(job)
log.debug("... queued")
return jobID
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def __init__(self, options):
"""
Initialize a Toil object from the given options. Note that this is very light-weight and
that the bulk of the work is done when the context is entered.
:param argparse.Namespace options: command line options specified by the user
"""
super(Toil, self).__init__()
self.options = options
self.config = None
"""
:type: toil.common.Config
"""
self._jobStore = None
"""
:type: toil.jobStores.abstractJobStore.AbstractJobStore
"""
self._batchSystem = None
"""
:type: toil.batchSystems.abstractBatchSystem.AbstractBatchSystem
"""
self._provisioner = None
"""
:type: toil.provisioners.abstractProvisioner.AbstractProvisioner
"""
self._jobCache = dict()
self._inContextManager = False
|
def __init__(self, options):
"""
Initialize a Toil object from the given options. Note that this is very light-weight and
that the bulk of the work is done when the context is entered.
:param argparse.Namespace options: command line options specified by the user
"""
super(Toil, self).__init__()
self.options = options
self.config = None
self._jobStore = None
self._batchSystem = None
self._provisioner = None
self._jobCache = dict()
self._inContextManager = False
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def create(cls, jobStore, leaderPath):
"""
Saves the content of the file or directory at the given path to the given job store
and returns a resource object representing that content for the purpose of obtaining it
again at a generic, public URL. This method should be invoked on the leader node.
:rtype: Resource
"""
pathHash = cls._pathHash(leaderPath)
contentHash = hashlib.md5()
# noinspection PyProtectedMember
with cls._load(leaderPath) as src:
with jobStore.writeSharedFileStream(
sharedFileName=pathHash, isProtected=False
) as dst:
userScript = src.read()
contentHash.update(userScript)
dst.write(userScript)
return cls(
name=os.path.basename(leaderPath),
pathHash=pathHash,
url=jobStore.getSharedPublicUrl(sharedFileName=pathHash),
contentHash=contentHash.hexdigest(),
)
|
def create(cls, jobStore, leaderPath):
"""
Saves the content of the file or directory at the given path to the given job store
and returns a resource object representing that content for the purpose of obtaining it
again at a generic, public URL. This method should be invoked on the leader node.
:rtype: Resource
"""
pathHash = cls._pathHash(leaderPath)
contentHash = hashlib.md5()
# noinspection PyProtectedMember
with cls._load(leaderPath) as src:
with jobStore.writeSharedFileStream(pathHash, isProtected=False) as dst:
userScript = src.read()
contentHash.update(userScript)
dst.write(userScript)
return cls(
name=os.path.basename(leaderPath),
pathHash=pathHash,
url=(jobStore.getSharedPublicUrl(pathHash)),
contentHash=contentHash.hexdigest(),
)
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def _load(cls, path):
"""
Returns a readable file-like object for the given path. If the path refers to a regular
file, this method returns the result of invoking open() on the given path. If the path
refers to a directory, this method returns a ZIP file with all files and subdirectories
in the directory at the given path.
:type path: str
:rtype: io.FileIO
"""
raise NotImplementedError()
|
def _load(cls, path):
"""
Returns a readable file-like object for the given path. If the path refers to a regular
file, this method returns the result of invoking open() on the given path. If the path
refers to a directory, this method returns a ZIP file with all files and subdirectories
in the directory at the given path.
:type path: str
:rtype: io.IOBase
"""
raise NotImplementedError()
|
https://github.com/DataBiosphere/toil/issues/1207
|
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Next available file descriptor: 10
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Parsed jobWrapper
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.worker:Got a command to run: _toil b5feca2d_3cbf_46a6_81cc_43d5f3a43cb6 /home/cmarkello/toilvenv/local/lib/python2.7/site-packages toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:toil.job:Loading user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline').
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't find resource for leader path '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages/toil_vg'
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline')
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', name='toil_vg.vg_evaluation_pipeline') from sys.path=['/home/cmarkello/toilvenv/bin', '/home/cmarkello/toilvenv/lib/python2.7', '/home/cmarkello/toilvenv/lib/python2.7/plat-x86_64-linux-gnu', '/home/cmarkello/toilvenv/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/lib/python2.7/lib-old', '/home/cmarkello/toilvenv/lib/python2.7/lib-dynload', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/home/cmarkello/toilvenv/local/lib/python2.7/site-packages', '/home/cmarkello/toil/src', '/home/cmarkello/toilvenv/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/home/cmarkello/toil-scripts/src', '/usr/local/lib/python2.7/site-packages', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/site-packages', '/usr/lib/python2.7/dist-packages']
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Traceback (most recent call last):
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/worker.py", line 316, in main
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/home/cmarkello/toil/src/toil/job.py", line 640, in _loadUserModuleWARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: return importlib.import_module(userModule.name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: __import__(name)
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ImportError: No module named toil_vg.vg_evaluation_pipeline
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host c1agent3
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:Setting read timeout to 65
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: DEBUG:urllib3.connectionpool:"GET /acmarkellotoilvgjobstore11xxjobitems(PartitionKey='default',RowKey='fc48619f_e141_4211_8ab9_3004f95b16df')?$select= HTTP/1.1" 200 None
WARNING:toil.leader:fc48619f_e141_4211_8ab9_3004f95b16df: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job fc48619f_e141_4211_8ab9_3004f95b16df to 0
|
ImportError
|
def getJobStore(cls, locator):
"""
Create an instance of the concrete job store implementation that matches the given locator.
:param str locator: The location of the job store to be represent by the instance
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: toil.jobStores.abstractJobStore.AbstractJobStore
"""
name, rest = cls.parseLocator(locator)
if name == "file":
from toil.jobStores.fileJobStore import FileJobStore
return FileJobStore(rest)
elif name == "aws":
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore(rest)
elif name == "azure":
from toil.jobStores.azureJobStore import AzureJobStore
return AzureJobStore(rest)
elif name == "google":
from toil.jobStores.googleJobStore import GoogleJobStore
projectID, namePrefix = rest.split(":", 1)
return GoogleJobStore(namePrefix, projectID)
else:
raise RuntimeError("Unknown job store implementation '%s'" % name)
|
def getJobStore(cls, locator):
"""
Create an instance of the concrete job store implementation that matches the given locator.
:param str locator: The location of the job store to be represent by the instance
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: toil.jobStores.abstractJobStore.AbstractJobStore
"""
name, rest = cls.parseLocator(locator)
if name == "file":
from toil.jobStores.fileJobStore import FileJobStore
return FileJobStore(rest)
elif name == "aws":
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore(rest)
elif name == "azure":
from toil.jobStores.azureJobStore import AzureJobStore
return AzureJobStore(rest)
elif name == "google":
from toil.jobStores.googleJobStore import GoogleJobStore
projectID, namePrefix = rest.split(":", 1)
return GoogleJobStore(namePrefix, projectID, config=config)
else:
raise RuntimeError("Unknown job store implementation '%s'" % name)
|
https://github.com/DataBiosphere/toil/issues/1127
|
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:Reporting file: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't find resource for leader path '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts'
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation')
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation') from sys.path=['/usr/local/bin', '/usr/local/lib/python2.7/dist-packages/pip-8.1.2-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages', '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts']
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Traceback (most recent call last):
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 316, in main
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 640, in _loadUserModule
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: return importlib.import_module(userModule.name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: __import__(name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ImportError: No module named parallelMappingEvaluation
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 52d0d400_4080_480d_9c53_e7a7624b20e9 to 0
WARNING:toil.leader:Job: 52d0d400_4080_480d_9c53_e7a7624b20e9 is completely failed
|
ImportError
|
def createBatchSystem(config, jobStore=None, userScript=None):
"""
Creates an instance of the batch system specified in the given config. If a job store and
a user script are given then the user script can be hot deployed into the workflow.
:param toil.common.Config config: the current configuration
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore: an instance of a jobStore
:param ModuleDescriptor userScript: a handle to the Python module defining the root job
:return: an instance of a concrete subclass of AbstractBatchSystem
:rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem
"""
kwargs = dict(
config=config,
maxCores=config.maxCores,
maxMemory=config.maxMemory,
maxDisk=config.maxDisk,
)
if config.batchSystem == "parasol":
from toil.batchSystems.parasol import ParasolBatchSystem
batchSystemClass = ParasolBatchSystem
elif (
config.batchSystem == "single_machine" or config.batchSystem == "singleMachine"
):
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
batchSystemClass = SingleMachineBatchSystem
elif config.batchSystem == "gridengine" or config.batchSystem == "gridEngine":
from toil.batchSystems.gridengine import GridengineBatchSystem
batchSystemClass = GridengineBatchSystem
elif config.batchSystem == "lsf" or config.batchSystem == "LSF":
from toil.batchSystems.lsf import LSFBatchSystem
batchSystemClass = LSFBatchSystem
elif config.batchSystem == "mesos" or config.batchSystem == "Mesos":
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
batchSystemClass = MesosBatchSystem
kwargs["masterAddress"] = config.mesosMasterAddress
elif config.batchSystem == "slurm" or config.batchSystem == "Slurm":
from toil.batchSystems.slurm import SlurmBatchSystem
batchSystemClass = SlurmBatchSystem
else:
raise RuntimeError("Unrecognised batch system: %s" % config.batchSystem)
if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup():
raise RuntimeError(
"%s currently does not support shared caching. Set the "
"--disableCaching flag if you want to "
"use this batch system." % config.batchSystem
)
logger.info(
"Using the %s"
% re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()
)
if jobStore is not None:
if userScript is not None:
if (
not userScript.belongsToToil
and batchSystemClass.supportsHotDeployment()
):
userScriptResource = userScript.saveAsResourceTo(jobStore)
with jobStore.writeSharedFileStream("userScript") as f:
f.write(userScriptResource.pickle())
kwargs["userScript"] = userScriptResource
else:
from toil.jobStores.abstractJobStore import NoSuchFileException
try:
with jobStore.readSharedFileStream("userScript") as f:
userScriptResource = Resource.unpickle(f.read())
kwargs["userScript"] = userScriptResource
except NoSuchFileException:
pass
return batchSystemClass(**kwargs)
|
def createBatchSystem(config, jobStore=None, userScript=None):
"""
Creates an instance of the batch system specified in the given config. If a job store and
a user script are given then the user script can be hot deployed into the workflow.
:param toil.common.Config config: the current configuration
:param jobStores.abstractJobStore.AbstractJobStore jobStore: an instance of a jobStore
:param ModuleDescriptor userScript: a user supplied script to use for hot development
:return: an instance of a concrete subclass of AbstractBatchSystem
:rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem
"""
kwargs = dict(
config=config,
maxCores=config.maxCores,
maxMemory=config.maxMemory,
maxDisk=config.maxDisk,
)
if config.batchSystem == "parasol":
from toil.batchSystems.parasol import ParasolBatchSystem
batchSystemClass = ParasolBatchSystem
elif (
config.batchSystem == "single_machine" or config.batchSystem == "singleMachine"
):
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
batchSystemClass = SingleMachineBatchSystem
elif config.batchSystem == "gridengine" or config.batchSystem == "gridEngine":
from toil.batchSystems.gridengine import GridengineBatchSystem
batchSystemClass = GridengineBatchSystem
elif config.batchSystem == "lsf" or config.batchSystem == "LSF":
from toil.batchSystems.lsf import LSFBatchSystem
batchSystemClass = LSFBatchSystem
elif config.batchSystem == "mesos" or config.batchSystem == "Mesos":
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
batchSystemClass = MesosBatchSystem
kwargs["masterAddress"] = config.mesosMasterAddress
elif config.batchSystem == "slurm" or config.batchSystem == "Slurm":
from toil.batchSystems.slurm import SlurmBatchSystem
batchSystemClass = SlurmBatchSystem
else:
raise RuntimeError("Unrecognised batch system: %s" % config.batchSystem)
if not config.disableCaching and not batchSystemClass.supportsWorkerCleanup():
raise RuntimeError(
"%s currently does not support shared caching. Set the "
"--disableCaching flag if you want to "
"use this batch system." % config.batchSystem
)
logger.info(
"Using the %s"
% re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()
)
if jobStore is not None and userScript is not None:
if not userScript.belongsToToil and batchSystemClass.supportsHotDeployment():
kwargs["userScript"] = userScript.saveAsResourceTo(jobStore)
return batchSystemClass(**kwargs)
|
https://github.com/DataBiosphere/toil/issues/1127
|
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:Reporting file: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't find resource for leader path '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts'
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation')
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation') from sys.path=['/usr/local/bin', '/usr/local/lib/python2.7/dist-packages/pip-8.1.2-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages', '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts']
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Traceback (most recent call last):
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 316, in main
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 640, in _loadUserModule
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: return importlib.import_module(userModule.name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: __import__(name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ImportError: No module named parallelMappingEvaluation
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 52d0d400_4080_480d_9c53_e7a7624b20e9 to 0
WARNING:toil.leader:Job: 52d0d400_4080_480d_9c53_e7a7624b20e9 is completely failed
|
ImportError
|
def register(self):
"""
Register this resource for later retrieval via lookup(), possibly in a child process.
"""
os.environ[self.resourceEnvNamePrefix + self.pathHash] = self.pickle()
|
def register(self):
"""
Register this resource for later retrieval via lookup(), possibly in a child process.
"""
os.environ[self.resourceEnvNamePrefix + self.pathHash] = self._pickle()
|
https://github.com/DataBiosphere/toil/issues/1127
|
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:Reporting file: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't find resource for leader path '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts'
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation')
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation') from sys.path=['/usr/local/bin', '/usr/local/lib/python2.7/dist-packages/pip-8.1.2-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages', '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts']
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Traceback (most recent call last):
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 316, in main
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 640, in _loadUserModule
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: return importlib.import_module(userModule.name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: __import__(name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ImportError: No module named parallelMappingEvaluation
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 52d0d400_4080_480d_9c53_e7a7624b20e9 to 0
WARNING:toil.leader:Job: 52d0d400_4080_480d_9c53_e7a7624b20e9 is completely failed
|
ImportError
|
def lookup(cls, leaderPath):
"""
Returns a resource object representing a resource created from a file or directory at the
given path on the leader. This method should be invoked on the worker. The given path
does not need to refer to an existing file or directory on the worker, it only identifies
the resource within an instance of toil. This method returns None if no resource for the
given path exists.
:rtype: Resource
"""
pathHash = cls._pathHash(leaderPath)
try:
s = os.environ[cls.resourceEnvNamePrefix + pathHash]
except KeyError:
log.warn("Can't find resource for leader path '%s'", leaderPath)
return None
else:
self = cls.unpickle(s)
assert self.pathHash == pathHash
return self
|
def lookup(cls, leaderPath):
"""
Returns a resource object representing a resource created from a file or directory at the
given path on the leader. This method should be invoked on the worker. The given path
does not need to refer to an existing file or directory on the worker, it only identifies
the resource within an instance of toil. This method returns None if no resource for the
given path exists.
:rtype: Resource
"""
pathHash = cls._pathHash(leaderPath)
try:
s = os.environ[cls.resourceEnvNamePrefix + pathHash]
except KeyError:
log.warn("Can't find resource for leader path '%s'", leaderPath)
return None
else:
self = cls._unpickle(s)
assert self.pathHash == pathHash
return self
|
https://github.com/DataBiosphere/toil/issues/1127
|
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:Reporting file: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't find resource for leader path '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts'
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation')
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation') from sys.path=['/usr/local/bin', '/usr/local/lib/python2.7/dist-packages/pip-8.1.2-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages', '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts']
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Traceback (most recent call last):
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 316, in main
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 640, in _loadUserModule
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: return importlib.import_module(userModule.name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: __import__(name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ImportError: No module named parallelMappingEvaluation
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 52d0d400_4080_480d_9c53_e7a7624b20e9 to 0
WARNING:toil.leader:Job: 52d0d400_4080_480d_9c53_e7a7624b20e9 is completely failed
|
ImportError
|
def getJobStore(locator):
"""
Create an instance of the concrete job store implementation that matches the given locator.
:param str locator: The location of the job store to be represent by the instance
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: toil.jobStores.abstractJobStore.AbstractJobStore
"""
if locator[0] in "/.":
locator = "file:" + locator
try:
name, rest = locator.split(":", 1)
except ValueError:
raise RuntimeError("Invalid job store locator syntax.")
if name == "file":
from toil.jobStores.fileJobStore import FileJobStore
return FileJobStore(rest)
elif name == "aws":
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore(rest)
elif name == "azure":
from toil.jobStores.azureJobStore import AzureJobStore
account, namePrefix = rest.split(":", 1)
return AzureJobStore(account, namePrefix, config=config)
elif name == "google":
from toil.jobStores.googleJobStore import GoogleJobStore
projectID, namePrefix = rest.split(":", 1)
return GoogleJobStore(namePrefix, projectID)
else:
raise RuntimeError("Unknown job store implementation '%s'" % name)
|
def getJobStore(locator):
"""
Create an instance of the concrete job store implementation that matches the given locator.
:param str locator: The location of the job store to be represent by the instance
:return: an instance of a concrete subclass of AbstractJobStore
:rtype: toil.jobStores.abstractJobStore.AbstractJobStore
"""
if locator[0] in "/.":
locator = "file:" + locator
try:
name, rest = locator.split(":", 1)
except ValueError:
raise RuntimeError("Invalid job store locator syntax.")
if name == "file":
from toil.jobStores.fileJobStore import FileJobStore
return FileJobStore(rest)
elif name == "aws":
from toil.jobStores.aws.jobStore import AWSJobStore
return AWSJobStore(rest)
elif name == "azure":
from toil.jobStores.azureJobStore import AzureJobStore
account, namePrefix = rest.split(":", 1)
return AzureJobStore(account, namePrefix, config=config)
elif name == "google":
from toil.jobStores.googleJobStore import GoogleJobStore
projectID, namePrefix = rest.split(":", 1)
return GoogleJobStore(namePrefix, projectID, config=config)
else:
raise RuntimeError("Unknown job store implementation '%s'" % name)
|
https://github.com/DataBiosphere/toil/issues/1127
|
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:Reporting file: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't find resource for leader path '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts'
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation')
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation') from sys.path=['/usr/local/bin', '/usr/local/lib/python2.7/dist-packages/pip-8.1.2-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages', '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts']
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Traceback (most recent call last):
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 316, in main
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 640, in _loadUserModule
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: return importlib.import_module(userModule.name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: __import__(name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ImportError: No module named parallelMappingEvaluation
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 52d0d400_4080_480d_9c53_e7a7624b20e9 to 0
WARNING:toil.leader:Job: 52d0d400_4080_480d_9c53_e7a7624b20e9 is completely failed
|
ImportError
|
def createBatchSystem(config, jobStore=None, userScript=None):
"""
Creates an instance of the batch system specified in the given config. If a job store and
a user script are given then the user script can be hot deployed into the workflow.
:param toil.common.Config config: the current configuration
:param toil.jobStores.abstractJobStore.AbstractJobStore jobStore: an instance of a jobStore
:param ModuleDescriptor userScript: a handle to the Python module defining the root job
:return: an instance of a concrete subclass of AbstractBatchSystem
:rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem
"""
kwargs = dict(
config=config,
maxCores=config.maxCores,
maxMemory=config.maxMemory,
maxDisk=config.maxDisk,
)
if config.batchSystem == "parasol":
from toil.batchSystems.parasol import ParasolBatchSystem
batchSystemClass = ParasolBatchSystem
elif (
config.batchSystem == "single_machine" or config.batchSystem == "singleMachine"
):
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
batchSystemClass = SingleMachineBatchSystem
elif config.batchSystem == "gridengine" or config.batchSystem == "gridEngine":
from toil.batchSystems.gridengine import GridengineBatchSystem
batchSystemClass = GridengineBatchSystem
elif config.batchSystem == "lsf" or config.batchSystem == "LSF":
from toil.batchSystems.lsf import LSFBatchSystem
batchSystemClass = LSFBatchSystem
elif config.batchSystem == "mesos" or config.batchSystem == "Mesos":
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
batchSystemClass = MesosBatchSystem
kwargs["masterAddress"] = config.mesosMasterAddress
elif config.batchSystem == "slurm" or config.batchSystem == "Slurm":
from toil.batchSystems.slurm import SlurmBatchSystem
batchSystemClass = SlurmBatchSystem
else:
raise RuntimeError("Unrecognised batch system: %s" % config.batchSystem)
if not batchSystemClass.supportsWorkerCleanup():
raise RuntimeError(
"%s currently does not support shared caching. Use Toil version "
"3.1.6 along with the --disableSharedCache option if you want to "
"use this batch system." % config.batchSystem
)
logger.info(
"Using the %s"
% re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()
)
if jobStore is not None:
if userScript is not None:
if (
not userScript.belongsToToil
and batchSystemClass.supportsHotDeployment()
):
userScriptResource = userScript.saveAsResourceTo(jobStore)
with jobStore.writeSharedFileStream("userScript") as f:
f.write(userScriptResource.pickle())
kwargs["userScript"] = userScriptResource
else:
from toil.jobStores.abstractJobStore import NoSuchFileException
try:
with jobStore.readSharedFileStream("userScript") as f:
userScriptResource = Resource.unpickle(f.read())
kwargs["userScript"] = userScriptResource
except NoSuchFileException:
pass
return batchSystemClass(**kwargs)
|
def createBatchSystem(config, jobStore=None, userScript=None):
"""
Creates an instance of the batch system specified in the given config. If a job store and
a user script are given then the user script can be hot deployed into the workflow.
:param toil.common.Config config: the current configuration
:param jobStores.abstractJobStore.AbstractJobStore jobStore: an instance of a jobStore
:param ModuleDescriptor userScript: a user supplied script to use for hot development
:return: an instance of a concrete subclass of AbstractBatchSystem
:rtype: batchSystems.abstractBatchSystem.AbstractBatchSystem
"""
kwargs = dict(
config=config,
maxCores=config.maxCores,
maxMemory=config.maxMemory,
maxDisk=config.maxDisk,
)
if config.batchSystem == "parasol":
from toil.batchSystems.parasol import ParasolBatchSystem
batchSystemClass = ParasolBatchSystem
elif (
config.batchSystem == "single_machine" or config.batchSystem == "singleMachine"
):
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
batchSystemClass = SingleMachineBatchSystem
elif config.batchSystem == "gridengine" or config.batchSystem == "gridEngine":
from toil.batchSystems.gridengine import GridengineBatchSystem
batchSystemClass = GridengineBatchSystem
elif config.batchSystem == "lsf" or config.batchSystem == "LSF":
from toil.batchSystems.lsf import LSFBatchSystem
batchSystemClass = LSFBatchSystem
elif config.batchSystem == "mesos" or config.batchSystem == "Mesos":
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
batchSystemClass = MesosBatchSystem
kwargs["masterAddress"] = config.mesosMasterAddress
elif config.batchSystem == "slurm" or config.batchSystem == "Slurm":
from toil.batchSystems.slurm import SlurmBatchSystem
batchSystemClass = SlurmBatchSystem
else:
raise RuntimeError("Unrecognised batch system: %s" % config.batchSystem)
if not batchSystemClass.supportsWorkerCleanup():
raise RuntimeError(
"%s currently does not support shared caching. Use Toil version "
"3.1.6 along with the --disableSharedCache option if you want to "
"use this batch system." % config.batchSystem
)
logger.info(
"Using the %s"
% re.sub("([a-z])([A-Z])", "\g<1> \g<2>", batchSystemClass.__name__).lower()
)
if jobStore is not None and userScript is not None:
if not userScript.belongsToToil and batchSystemClass.supportsHotDeployment():
kwargs["userScript"] = userScript.saveAsResourceTo(jobStore)
return batchSystemClass(**kwargs)
|
https://github.com/DataBiosphere/toil/issues/1127
|
WARNING:toil.leader:The jobWrapper seems to have left a log file, indicating failure: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:Reporting file: 52d0d400_4080_480d_9c53_e7a7624b20e9
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ---TOIL WORKER OUTPUT LOG---
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't find resource for leader path '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts'
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.resource:Can't localize module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation')
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.job:Failed to import user module ModuleDescriptor(dirPath='/home/anovak/hgvm-graph-bakeoff-evaluations/scripts', name='parallelMappingEvaluation') from sys.path=['/usr/local/bin', '/usr/local/lib/python2.7/dist-packages/pip-8.1.2-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos.interface-0.22.0-py2.7.egg', '/usr/local/lib/python2.7/dist-packages/mesos-0.22.0-py2.7-linux-x86_64.egg', '/usr/lib/python2.7', '/usr/lib/python2.7/plat-x86_64-linux-gnu', '/usr/lib/python2.7/lib-tk', '/usr/lib/python2.7/lib-old', '/usr/lib/python2.7/lib-dynload', '/usr/local/lib/python2.7/dist-packages', '/usr/lib/python2.7/dist-packages', '/home/anovak/hgvm-graph-bakeoff-evaluations/scripts']
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Traceback (most recent call last):
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/worker.py", line 316, in main
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: job = Job._loadJob(jobWrapper.command, jobStore)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 666, in _loadJob
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: userModule = cls._loadUserModule(userModule)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/local/lib/python2.7/dist-packages/toil/job.py", line 640, in _loadUserModule
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: return importlib.import_module(userModule.name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: __import__(name)
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ImportError: No module named parallelMappingEvaluation
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: ERROR:toil.worker:Exiting the worker because of a failed jobWrapper on host amn2agent3
WARNING:toil.leader:52d0d400_4080_480d_9c53_e7a7624b20e9: WARNING:toil.jobWrapper:Due to failure we are reducing the remaining retry count of job 52d0d400_4080_480d_9c53_e7a7624b20e9 to 0
WARNING:toil.leader:Job: 52d0d400_4080_480d_9c53_e7a7624b20e9 is completely failed
|
ImportError
|
def processFinishedJob(self, jobBatchSystemID, resultStatus, wallTime=None):
"""
Function reads a processed jobWrapper file and updates it state.
"""
def processRemovedJob(jobStoreID):
if resultStatus != 0:
logger.warn(
"Despite the batch system claiming failure the "
"jobWrapper %s seems to have finished and been removed",
jobStoreID,
)
self._updatePredecessorStatus(jobStoreID)
if wallTime is not None and self.clusterScaler is not None:
issuedJob = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID]
self.clusterScaler.addCompletedJob(issuedJob, wallTime)
jobStoreID = self.removeJobID(jobBatchSystemID)
if self.jobStore.exists(jobStoreID):
logger.debug("Job %s continues to exist (i.e. has more to do)" % jobStoreID)
try:
jobWrapper = self.jobStore.load(jobStoreID)
except NoSuchJobException:
# Avoid importing AWSJobStore as the corresponding extra might be missing
if self.jobStore.__class__.__name__ == "AWSJobStore":
# We have a ghost job - the job has been deleted but a stale read from
# SDB gave us a false positive when we checked for its existence.
# Process the job from here as any other job removed from the job store.
# This is a temporary work around until https://github.com/BD2KGenomics/toil/issues/1091
# is completed
logger.warn("Got a stale read from SDB for job %s", jobStoreID)
processRemovedJob(jobStoreID)
return
else:
raise
if jobWrapper.logJobStoreFileID is not None:
logger.warn(
"The jobWrapper seems to have left a log file, indicating failure: %s",
jobStoreID,
)
with jobWrapper.getLogFileHandle(self.jobStore) as logFileStream:
logStream(logFileStream, jobStoreID, logger.warn)
if resultStatus != 0:
# If the batch system returned a non-zero exit code then the worker
# is assumed not to have captured the failure of the job, so we
# reduce the retry count here.
if jobWrapper.logJobStoreFileID is None:
logger.warn(
"No log file is present, despite jobWrapper failing: %s", jobStoreID
)
jobWrapper.setupJobAfterFailure(self.config)
self.jobStore.update(jobWrapper)
elif jobStoreID in self.toilState.hasFailedSuccessors:
# If the job has completed okay, we can remove it from the list of jobs with failed successors
self.toilState.hasFailedSuccessors.remove(jobStoreID)
self.toilState.updatedJobs.add((jobWrapper, resultStatus)) # Now we know the
# jobWrapper is done we can add it to the list of updated jobWrapper files
logger.debug("Added jobWrapper: %s to active jobs", jobStoreID)
else: # The jobWrapper is done
processRemovedJob(jobStoreID)
|
def processFinishedJob(self, jobBatchSystemID, resultStatus, wallTime=None):
"""
Function reads a processed jobWrapper file and updates it state.
"""
def processRemovedJob(jobStoreID):
if resultStatus != 0:
logger.warn(
"Despite the batch system claiming failure the "
"jobWrapper %s seems to have finished and been removed",
jobStoreID,
)
self._updatePredecessorStatus(jobStoreID)
if wallTime is not None and self.clusterScaler is not None:
issuedJob = self.jobBatchSystemIDToIssuedJob[jobBatchSystemID]
self.clusterScaler.addCompletedJob(issuedJob, wallTime)
jobStoreID = self.removeJobID(jobBatchSystemID)
if self.jobStore.exists(jobStoreID):
logger.debug("Job %s continues to exist (i.e. has more to do)" % jobStoreID)
try:
jobWrapper = self.jobStore.load(jobStoreID)
except NoSuchJobException:
if isinstance(self.jobStore, AWSJobStore):
# We have a ghost job - the job has been deleted but a stale read from
# SDB gave us a false positive when we checked for its existence.
# Process the job from here as any other job removed from the job store.
# This is a temporary work around until https://github.com/BD2KGenomics/toil/issues/1091
# is completed
logger.warn("Got a stale read from SDB for job %s", jobStoreID)
processRemovedJob(jobStoreID)
return
else:
raise
if jobWrapper.logJobStoreFileID is not None:
logger.warn(
"The jobWrapper seems to have left a log file, indicating failure: %s",
jobStoreID,
)
with jobWrapper.getLogFileHandle(self.jobStore) as logFileStream:
logStream(logFileStream, jobStoreID, logger.warn)
if resultStatus != 0:
# If the batch system returned a non-zero exit code then the worker
# is assumed not to have captured the failure of the job, so we
# reduce the retry count here.
if jobWrapper.logJobStoreFileID is None:
logger.warn(
"No log file is present, despite jobWrapper failing: %s", jobStoreID
)
jobWrapper.setupJobAfterFailure(self.config)
self.jobStore.update(jobWrapper)
elif jobStoreID in self.toilState.hasFailedSuccessors:
# If the job has completed okay, we can remove it from the list of jobs with failed successors
self.toilState.hasFailedSuccessors.remove(jobStoreID)
self.toilState.updatedJobs.add((jobWrapper, resultStatus)) # Now we know the
# jobWrapper is done we can add it to the list of updated jobWrapper files
logger.debug("Added jobWrapper: %s to active jobs", jobStoreID)
else: # The jobWrapper is done
processRemovedJob(jobStoreID)
|
https://github.com/DataBiosphere/toil/issues/1115
|
(venv) jenkins@ip-172-31-29-42:~/toil$ make develop
…
(venv) jenkins@ip-172-31-29-42:~/toil$ python HelloWorld.py --batchSystem gridEngine --disableCaching --logDebug ./jobStore
ip-172-31-29-42 2016-08-23 21:52:06,214 MainThread INFO toil.lib.bioio: Logging set at level: DEBUG
ip-172-31-29-42 2016-08-23 21:52:06,214 MainThread INFO toil.lib.bioio: Logging set at level: DEBUG
ip-172-31-29-42 2016-08-23 21:52:06,223 MainThread INFO toil.jobStores.fileJobStore: Path to job store directory is '/home/jenkins/toil/jobStore'.
ip-172-31-29-42 2016-08-23 21:52:06,223 MainThread INFO toil.jobStores.abstractJobStore: The workflow ID is: '0a91cea1-30dc-4968-a5f1-930414702a2d'
ip-172-31-29-42 2016-08-23 21:52:06,265 MainThread INFO toil.common: Using the gridengine batch system
ip-172-31-29-42 2016-08-23 21:52:06,334 Thread-1 DEBUG toil.batchSystems.gridengine: List of running jobs: set([])
ip-172-31-29-42 2016-08-23 21:52:06,334 MainThread INFO toil.common: Written the environment for the jobs to the environment file
ip-172-31-29-42 2016-08-23 21:52:06,334 MainThread INFO toil.common: Caching all jobs in job store
ip-172-31-29-42 2016-08-23 21:52:06,334 MainThread INFO toil.common: 0 jobs downloaded.
ip-172-31-29-42 2016-08-23 21:52:06,336 Thread-1 DEBUG toil.batchSystems.gridengine: No activity, sleeping for 1s
ip-172-31-29-42 2016-08-23 21:52:06,353 MainThread INFO toil.realtimeLogger: Real-time logging disabled
ip-172-31-29-42 2016-08-23 21:52:06,367 MainThread DEBUG toil.common: Shutting down batch system ...
ip-172-31-29-42 2016-08-23 21:52:07,338 Thread-1 DEBUG toil.batchSystems.gridengine: Received queue sentinel.
ip-172-31-29-42 2016-08-23 21:52:07,338 MainThread DEBUG toil.common: ... finished shutting down the batch system in 0.970496177673 seconds.
Traceback (most recent call last):
File "HelloWorld.py", line 11, in <module>
print Job.Runner.startToil(j, options)
File "/home/jenkins/toil/src/toil/job.py", line 542, in startToil
return toil.start(job)
File "/home/jenkins/toil/src/toil/common.py", line 580, in start
return self._runMainLoop(job)
File "/home/jenkins/toil/src/toil/common.py", line 788, in _runMainLoop
from toil.leader import mainLoop
File "/home/jenkins/toil/src/toil/leader.py", line 32, in <module>
from toil.jobStores.aws.jobStore import AWSJobStore
File "/home/jenkins/toil/src/toil/jobStores/aws/jobStore.py", line 34, in <module>
from boto.sdb.domain import Domain
ImportError: No module named boto.sdb.domain
|
ImportError
|
def __init__(self):
# Core options
self.workflowID = None
"""This attribute uniquely identifies the job store and therefore the workflow. It is
necessary in order to distinguish between two consequitive workflows for which
self.jobStore is the same, e.g. when a job store name is reused after a previous run has
finished sucessfully and its job store has been clean up."""
self.workflowAttemptNumber = None
self.jobStore = os.path.abspath("./toil")
self.logLevel = getLogLevelString()
self.workDir = None
self.stats = False
# Because the stats option needs the jobStore to persist past the end of the run,
# the clean default value depends the specified stats option and is determined in setOptions
self.clean = None
self.cleanWorkDir = None
# Restarting the workflow options
self.restart = False
# Batch system options
self.batchSystem = "singleMachine"
self.scale = 1
self.mesosMasterAddress = "localhost:5050"
self.parasolCommand = "parasol"
self.parasolMaxBatches = 10000
self.environment = {}
# Autoscaling options
self.provisioner = None
self.preemptableNodeType = None
self.preemptableNodeOptions = None
self.preemptableBidPrice = None
self.minPreemptableNodes = 0
self.maxPreemptableNodes = 10
self.nodeType = None
self.nodeOptions = None
self.minNodes = 0
self.maxNodes = 10
self.alphaPacking = 0.8
self.betaInertia = 1.2
self.scaleInterval = 360
# Resource requirements
self.defaultMemory = 2147483648
self.defaultCores = 1
self.defaultDisk = 2147483648
self.readGlobalFileMutableByDefault = False
self.defaultPreemptable = False
self.maxCores = sys.maxint
self.maxMemory = sys.maxint
self.maxDisk = sys.maxint
# Retrying/rescuing jobs
self.retryCount = 0
self.maxJobDuration = sys.maxint
self.rescueJobsFrequency = 3600
# Misc
self.maxLogFileSize = 50120
self.sseKey = None
self.cseKey = None
self.servicePollingInterval = 60
self.useAsync = True
# Debug options
self.badWorker = 0.0
self.badWorkerFailInterval = 0.01
|
def __init__(self):
# Core options
self.workflowID = None
"""This attribute uniquely identifies the job store and therefore the workflow. It is
necessary in order to distinguish between two consequitive workflows for which
self.jobStore is the same, e.g. when a job store name is reused after a previous run has
finished sucessfully and its job store has been clean up."""
self.workflowAttemptNumber = 0
self.jobStore = os.path.abspath("./toil")
self.logLevel = getLogLevelString()
self.workDir = None
self.stats = False
# Because the stats option needs the jobStore to persist past the end of the run,
# the clean default value depends the specified stats option and is determined in setOptions
self.clean = None
self.cleanWorkDir = None
# Restarting the workflow options
self.restart = False
# Batch system options
self.batchSystem = "singleMachine"
self.scale = 1
self.mesosMasterAddress = "localhost:5050"
self.parasolCommand = "parasol"
self.parasolMaxBatches = 10000
self.environment = {}
# Autoscaling options
self.provisioner = None
self.preemptableNodeType = None
self.preemptableNodeOptions = None
self.preemptableBidPrice = None
self.minPreemptableNodes = 0
self.maxPreemptableNodes = 10
self.nodeType = None
self.nodeOptions = None
self.minNodes = 0
self.maxNodes = 10
self.alphaPacking = 0.8
self.betaInertia = 1.2
self.scaleInterval = 360
# Resource requirements
self.defaultMemory = 2147483648
self.defaultCores = 1
self.defaultDisk = 2147483648
self.readGlobalFileMutableByDefault = False
self.defaultPreemptable = False
self.maxCores = sys.maxint
self.maxMemory = sys.maxint
self.maxDisk = sys.maxint
# Retrying/rescuing jobs
self.retryCount = 0
self.maxJobDuration = sys.maxint
self.rescueJobsFrequency = 3600
# Misc
self.maxLogFileSize = 50120
self.sseKey = None
self.cseKey = None
self.servicePollingInterval = 60
self.useAsync = True
# Debug options
self.badWorker = 0.0
self.badWorkerFailInterval = 0.01
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
def _addOptions(addGroupFn, config):
#
# Core options
#
addOptionFn = addGroupFn(
"toil core options",
"Options to specify the location of the Toil workflow and turn on "
"stats collation about the performance of jobs.",
)
addOptionFn(
"jobStore",
type=str,
help="The location of the job store for the workflow. " + jobStoreLocatorHelp,
)
addOptionFn(
"--workDir",
dest="workDir",
default=None,
help="Absolute path to directory where temporary files generated during the Toil "
"run should be placed. Temp files and folders will be placed in a directory "
"toil-<workflowID> within workDir (The workflowID is generated by Toil and "
"will be reported in the workflow logs. Default is determined by the "
"user-defined environmental variable TOIL_TEMPDIR, or the environment "
"variables (TMPDIR, TEMP, TMP) via mkdtemp. This directory needs to exist on "
"all machines running jobs.",
)
addOptionFn(
"--stats",
dest="stats",
action="store_true",
default=None,
help="Records statistics about the toil workflow to be used by 'toil stats'.",
)
addOptionFn(
"--clean",
dest="clean",
choices=["always", "onError", "never", "onSuccess"],
default=None,
help=(
"Determines the deletion of the jobStore upon completion of the program. "
"Choices: 'always', 'onError','never', 'onSuccess'. The --stats option requires "
"information from the jobStore upon completion so the jobStore will never be deleted with"
"that flag. If you wish to be able to restart the run, choose 'never' or 'onSuccess'. "
"Default is 'never' if stats is enabled, and 'onSuccess' otherwise"
),
)
addOptionFn(
"--cleanWorkDir",
dest="cleanWorkDir",
choices=["always", "never", "onSuccess", "onError"],
default="always",
help=(
"Determines deletion of temporary worker directory upon completion of a job. Choices: 'always', "
"'never', 'onSuccess'. Default = always. WARNING: This option should be changed for debugging "
"only. Running a full pipeline with this option could fill your disk with intermediate data."
),
)
#
# Restarting the workflow options
#
addOptionFn = addGroupFn(
"toil options for restarting an existing workflow",
"Allows the restart of an existing workflow",
)
addOptionFn(
"--restart",
dest="restart",
default=None,
action="store_true",
help="If --restart is specified then will attempt to restart existing workflow "
"at the location pointed to by the --jobStore option. Will raise an exception if the workflow does not exist",
)
#
# Batch system options
#
addOptionFn = addGroupFn(
"toil options for specifying the batch system",
"Allows the specification of the batch system, and arguments to the batch system/big batch system (see below).",
)
addOptionFn(
"--batchSystem",
dest="batchSystem",
default=None,
help=(
"The type of batch system to run the job(s) with, currently can be one "
"of singleMachine, parasol, gridEngine, lsf or mesos'. default=%s"
% config.batchSystem
),
)
addOptionFn(
"--scale",
dest="scale",
default=None,
help=(
"A scaling factor to change the value of all submitted tasks's submitted cores. "
"Used in singleMachine batch system. default=%s" % config.scale
),
)
addOptionFn(
"--mesosMaster",
dest="mesosMasterAddress",
default=None,
help=(
"The host and port of the Mesos master separated by colon. default=%s"
% config.mesosMasterAddress
),
)
addOptionFn(
"--parasolCommand",
dest="parasolCommand",
default=None,
help="The name or path of the parasol program. Will be looked up on PATH "
"unless it starts with a slashdefault=%s" % config.parasolCommand,
)
addOptionFn(
"--parasolMaxBatches",
dest="parasolMaxBatches",
default=None,
help="Maximum number of job batches the Parasol batch is allowed to create. One "
"batch is created for jobs with a a unique set of resource requirements. "
"default=%i" % config.parasolMaxBatches,
)
#
# Auto scaling options
#
addOptionFn = addGroupFn(
"toil options for autoscaling the cluster of worker nodes",
"Allows the specification of the minimum and maximum number of nodes "
"in an autoscaled cluster, as well as parameters to control the "
"level of provisioning.",
)
addOptionFn(
"--provisioner",
dest="provisioner",
choices=["cgcloud"],
help="The provisioner for cluster auto-scaling. Currently only the cgcloud "
"provisioner exists. The default is %s." % config.provisioner,
)
for preemptable in (False, True):
def _addOptionFn(*name, **kwargs):
name = list(name)
if preemptable:
name.insert(-1, "preemptable")
name = "".join(
(s[0].upper() + s[1:]) if i else s for i, s in enumerate(name)
)
terms = re.compile(r"\{([^{}]+)\}")
_help = kwargs.pop("help")
_help = "".join(
(term.split("|") * 2)[int(preemptable)] for term in terms.split(_help)
)
addOptionFn(
"--" + name,
dest=name,
help=_help + " The default is %s." % getattr(config, name),
**kwargs,
)
_addOptionFn(
"nodeType",
metavar="TYPE",
help="Node type for {non-|}preemptable nodes. The syntax depends on the "
"provisioner used. For the cgcloud provisioner this is the name of an "
"EC2 instance type{|, followed by a colon and the price in dollar to "
"bid for a spot instance}, for example 'c3.8xlarge{|:0.42}'.",
)
_addOptionFn(
"nodeOptions",
metavar="OPTIONS",
help="Provisioning options for the {non-|}preemptable node type. The syntax "
"depends on the provisioner used. For the cgcloud provisioner this is a "
"space-separated list of options to cgcloud's grow-cluster command (run "
"'cgcloud grow-cluster --help' for details.",
)
for p, q in [("min", "Minimum"), ("max", "Maximum")]:
_addOptionFn(
p,
"nodes",
default=None,
metavar="NUM",
help=q + " number of {non-|}preemptable nodes in the cluster, if using "
"auto-scaling.",
)
# TODO: DESCRIBE THE FOLLOWING TWO PARAMETERS
addOptionFn(
"--alphaPacking",
dest="alphaPacking",
default=None,
help=(" default=%s" % config.alphaPacking),
)
addOptionFn(
"--betaInertia",
dest="betaInertia",
default=None,
help=(" default=%s" % config.betaInertia),
)
addOptionFn(
"--scaleInterval",
dest="scaleInterval",
default=None,
help=(
"The interval (seconds) between assessing if the scale of"
" the cluster needs to change. default=%s" % config.scaleInterval
),
)
#
# Resource requirements
#
addOptionFn = addGroupFn(
"toil options for cores/memory requirements",
"The options to specify default cores/memory requirements (if not "
"specified by the jobs themselves), and to limit the total amount of "
"memory/cores requested from the batch system.",
)
addOptionFn(
"--defaultMemory",
dest="defaultMemory",
default=None,
metavar="INT",
help="The default amount of memory to request for a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Standard "
"suffixes like K, Ki, M, Mi, G or Gi are supported. Default is %s"
% bytes2human(config.defaultMemory, symbols="iec"),
)
addOptionFn(
"--defaultCores",
dest="defaultCores",
default=None,
metavar="FLOAT",
help="The default number of CPU cores to dedicate a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Fractions of a "
"core (for example 0.1) are supported on some batch systems, namely Mesos "
"and singleMachine. Default is %.1f " % config.defaultCores,
)
addOptionFn(
"--defaultDisk",
dest="defaultDisk",
default=None,
metavar="INT",
help="The default amount of disk space to dedicate a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Standard "
"suffixes like K, Ki, M, Mi, G or Gi are supported. Default is %s"
% bytes2human(config.defaultDisk, symbols="iec"),
)
addOptionFn(
"--readGlobalFileMutableByDefault",
dest="readGlobalFileMutableByDefault",
action="store_true",
default=None,
help="Toil disallows modification of read "
"global files by default. This flag makes "
"it makes read file mutable by default, "
"however it also defeats the purpose of "
"shared caching via hard links to save "
"space. Default is False",
)
addOptionFn(
"--maxCores",
dest="maxCores",
default=None,
metavar="INT",
help="The maximum number of CPU cores to request from the batch system at any one "
"time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. Default "
"is %s" % bytes2human(config.maxCores, symbols="iec"),
)
addOptionFn(
"--maxMemory",
dest="maxMemory",
default=None,
metavar="INT",
help="The maximum amount of memory to request from the batch system at any one "
"time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. Default "
"is %s" % bytes2human(config.maxMemory, symbols="iec"),
)
addOptionFn(
"--maxDisk",
dest="maxDisk",
default=None,
metavar="INT",
help="The maximum amount of disk space to request from the batch system at any "
"one time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. "
"Default is %s" % bytes2human(config.maxDisk, symbols="iec"),
)
#
# Retrying/rescuing jobs
#
addOptionFn = addGroupFn(
"toil options for rescuing/killing/restarting jobs",
"The options for jobs that either run too long/fail or get lost \
(some batch systems have issues!)",
)
addOptionFn(
"--retryCount",
dest="retryCount",
default=None,
help=(
"Number of times to retry a failing job before giving up and "
"labeling job failed. default=%s" % config.retryCount
),
)
addOptionFn(
"--maxJobDuration",
dest="maxJobDuration",
default=None,
help=(
"Maximum runtime of a job (in seconds) before we kill it "
"(this is a lower bound, and the actual time before killing "
"the job may be longer). default=%s" % config.maxJobDuration
),
)
addOptionFn(
"--rescueJobsFrequency",
dest="rescueJobsFrequency",
default=None,
help=(
"Period of time to wait (in seconds) between checking for "
"missing/overlong jobs, that is jobs which get lost by the batch system. Expert parameter. default=%s"
% config.rescueJobsFrequency
),
)
#
# Misc options
#
addOptionFn = addGroupFn("toil miscellaneous options", "Miscellaneous options")
addOptionFn(
"--maxLogFileSize",
dest="maxLogFileSize",
default=None,
help=(
"The maximum size of a job log file to keep (in bytes), log files larger "
"than this will be truncated to the last X bytes. Default is 50 "
"kilobytes, default=%s" % config.maxLogFileSize
),
)
addOptionFn(
"--realTimeLogging",
dest="realTimeLogging",
action="store_true",
default=False,
help="Enable real-time logging from workers to masters",
)
addOptionFn(
"--sseKey",
dest="sseKey",
default=None,
help="Path to file containing 32 character key to be used for server-side encryption on awsJobStore. SSE will "
"not be used if this flag is not passed.",
)
addOptionFn(
"--cseKey",
dest="cseKey",
default=None,
help="Path to file containing 256-bit key to be used for client-side encryption on "
"azureJobStore. By default, no encryption is used.",
)
addOptionFn(
"--setEnv",
"-e",
metavar="NAME=VALUE or NAME",
dest="environment",
default=[],
action="append",
help="Set an environment variable early on in the worker. If VALUE is omitted, "
"it will be looked up in the current environment. Independently of this "
"option, the worker will try to emulate the leader's environment before "
"running a job. Using this option, a variable can be injected into the "
"worker process itself before it is started.",
)
addOptionFn(
"--servicePollingInterval",
dest="servicePollingInterval",
default=None,
help="Interval of time service jobs wait between polling for the existence"
" of the keep-alive flag (defailt=%s)" % config.servicePollingInterval,
)
#
# Debug options
#
addOptionFn = addGroupFn("toil debug options", "Debug options")
addOptionFn(
"--badWorker",
dest="badWorker",
default=None,
help=(
"For testing purposes randomly kill 'badWorker' proportion of jobs using SIGKILL, default=%s"
% config.badWorker
),
)
addOptionFn(
"--badWorkerFailInterval",
dest="badWorkerFailInterval",
default=None,
help=(
"When killing the job pick uniformly within the interval from 0.0 to "
"'badWorkerFailInterval' seconds after the worker starts, default=%s"
% config.badWorkerFailInterval
),
)
|
def _addOptions(addGroupFn, config):
#
# Core options
#
addOptionFn = addGroupFn(
"toil core options",
"Options to specify the \
location of the toil workflow and turn on stats collation about the performance of jobs.",
)
addOptionFn(
"jobStore",
type=str,
help=(
"Store in which to place job management files and the global accessed "
"temporary files. Job store locator strings should be formatted as follows\n"
"aws:<AWS region>:<name prefix>\n"
"azure:<account>:<name prefix>'\n"
"google:<project id>:<name prefix>\n"
"file:<file path>\n"
"Note that for backwards compatibility ./foo is equivalent to file:/foo and "
"/bar is equivalent to file:/bar.\n"
"(If this is a file path this needs to be globally accessible by all machines"
" running jobs).\n"
"If the store already exists and restart is false a JobStoreCreationException"
" exception will be thrown."
),
)
addOptionFn(
"--workDir",
dest="workDir",
default=None,
help="Absolute path to directory where temporary files generated during the Toil "
"run should be placed. Temp files and folders will be placed in a directory "
"toil-<workflowID> within workDir (The workflowID is generated by Toil and "
"will be reported in the workflow logs. Default is determined by the "
"user-defined environmental variable TOIL_TEMPDIR, or the environment "
"variables (TMPDIR, TEMP, TMP) via mkdtemp. This directory needs to exist on "
"all machines running jobs.",
)
addOptionFn(
"--stats",
dest="stats",
action="store_true",
default=None,
help="Records statistics about the toil workflow to be used by 'toil stats'.",
)
addOptionFn(
"--clean",
dest="clean",
choices=["always", "onError", "never", "onSuccess"],
default=None,
help=(
"Determines the deletion of the jobStore upon completion of the program. "
"Choices: 'always', 'onError','never', 'onSuccess'. The --stats option requires "
"information from the jobStore upon completion so the jobStore will never be deleted with"
"that flag. If you wish to be able to restart the run, choose 'never' or 'onSuccess'. "
"Default is 'never' if stats is enabled, and 'onSuccess' otherwise"
),
)
addOptionFn(
"--cleanWorkDir",
dest="cleanWorkDir",
choices=["always", "never", "onSuccess", "onError"],
default="always",
help=(
"Determines deletion of temporary worker directory upon completion of a job. Choices: 'always', "
"'never', 'onSuccess'. Default = always. WARNING: This option should be changed for debugging "
"only. Running a full pipeline with this option could fill your disk with intermediate data."
),
)
#
# Restarting the workflow options
#
addOptionFn = addGroupFn(
"toil options for restarting an existing workflow",
"Allows the restart of an existing workflow",
)
addOptionFn(
"--restart",
dest="restart",
default=None,
action="store_true",
help="If --restart is specified then will attempt to restart existing workflow "
"at the location pointed to by the --jobStore option. Will raise an exception if the workflow does not exist",
)
#
# Batch system options
#
addOptionFn = addGroupFn(
"toil options for specifying the batch system",
"Allows the specification of the batch system, and arguments to the batch system/big batch system (see below).",
)
addOptionFn(
"--batchSystem",
dest="batchSystem",
default=None,
help=(
"The type of batch system to run the job(s) with, currently can be one "
"of singleMachine, parasol, gridEngine, lsf or mesos'. default=%s"
% config.batchSystem
),
)
addOptionFn(
"--scale",
dest="scale",
default=None,
help=(
"A scaling factor to change the value of all submitted tasks's submitted cores. "
"Used in singleMachine batch system. default=%s" % config.scale
),
)
addOptionFn(
"--mesosMaster",
dest="mesosMasterAddress",
default=None,
help=(
"The host and port of the Mesos master separated by colon. default=%s"
% config.mesosMasterAddress
),
)
addOptionFn(
"--parasolCommand",
dest="parasolCommand",
default=None,
help="The name or path of the parasol program. Will be looked up on PATH "
"unless it starts with a slashdefault=%s" % config.parasolCommand,
)
addOptionFn(
"--parasolMaxBatches",
dest="parasolMaxBatches",
default=None,
help="Maximum number of job batches the Parasol batch is allowed to create. One "
"batch is created for jobs with a a unique set of resource requirements. "
"default=%i" % config.parasolMaxBatches,
)
#
# Auto scaling options
#
addOptionFn = addGroupFn(
"toil options for autoscaling the cluster of worker nodes",
"Allows the specification of the minimum and maximum number of nodes "
"in an autoscaled cluster, as well as parameters to control the "
"level of provisioning.",
)
addOptionFn(
"--provisioner",
dest="provisioner",
choices=["cgcloud"],
help="The provisioner for cluster auto-scaling. Currently only the cgcloud "
"provisioner exists. The default is %s." % config.provisioner,
)
for preemptable in (False, True):
def _addOptionFn(*name, **kwargs):
name = list(name)
if preemptable:
name.insert(-1, "preemptable")
name = "".join(
(s[0].upper() + s[1:]) if i else s for i, s in enumerate(name)
)
terms = re.compile(r"\{([^{}]+)\}")
_help = kwargs.pop("help")
_help = "".join(
(term.split("|") * 2)[int(preemptable)] for term in terms.split(_help)
)
addOptionFn(
"--" + name,
dest=name,
help=_help + " The default is %s." % getattr(config, name),
**kwargs,
)
_addOptionFn(
"nodeType",
metavar="TYPE",
help="Node type for {non-|}preemptable nodes. The syntax depends on the "
"provisioner used. For the cgcloud provisioner this is the name of an "
"EC2 instance type{|, followed by a colon and the price in dollar to "
"bid for a spot instance}, for example 'c3.8xlarge{|:0.42}'.",
)
_addOptionFn(
"nodeOptions",
metavar="OPTIONS",
help="Provisioning options for the {non-|}preemptable node type. The syntax "
"depends on the provisioner used. For the cgcloud provisioner this is a "
"space-separated list of options to cgcloud's grow-cluster command (run "
"'cgcloud grow-cluster --help' for details.",
)
for p, q in [("min", "Minimum"), ("max", "Maximum")]:
_addOptionFn(
p,
"nodes",
default=None,
metavar="NUM",
help=q + " number of {non-|}preemptable nodes in the cluster, if using "
"auto-scaling.",
)
# TODO: DESCRIBE THE FOLLOWING TWO PARAMETERS
addOptionFn(
"--alphaPacking",
dest="alphaPacking",
default=None,
help=(" default=%s" % config.alphaPacking),
)
addOptionFn(
"--betaInertia",
dest="betaInertia",
default=None,
help=(" default=%s" % config.betaInertia),
)
addOptionFn(
"--scaleInterval",
dest="scaleInterval",
default=None,
help=(
"The interval (seconds) between assessing if the scale of"
" the cluster needs to change. default=%s" % config.scaleInterval
),
)
#
# Resource requirements
#
addOptionFn = addGroupFn(
"toil options for cores/memory requirements",
"The options to specify default cores/memory requirements (if not "
"specified by the jobs themselves), and to limit the total amount of "
"memory/cores requested from the batch system.",
)
addOptionFn(
"--defaultMemory",
dest="defaultMemory",
default=None,
metavar="INT",
help="The default amount of memory to request for a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Standard "
"suffixes like K, Ki, M, Mi, G or Gi are supported. Default is %s"
% bytes2human(config.defaultMemory, symbols="iec"),
)
addOptionFn(
"--defaultCores",
dest="defaultCores",
default=None,
metavar="FLOAT",
help="The default number of CPU cores to dedicate a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Fractions of a "
"core (for example 0.1) are supported on some batch systems, namely Mesos "
"and singleMachine. Default is %.1f " % config.defaultCores,
)
addOptionFn(
"--defaultDisk",
dest="defaultDisk",
default=None,
metavar="INT",
help="The default amount of disk space to dedicate a job. Only applicable to jobs "
"that do not specify an explicit value for this requirement. Standard "
"suffixes like K, Ki, M, Mi, G or Gi are supported. Default is %s"
% bytes2human(config.defaultDisk, symbols="iec"),
)
addOptionFn(
"--readGlobalFileMutableByDefault",
dest="readGlobalFileMutableByDefault",
action="store_true",
default=None,
help="Toil disallows modification of read "
"global files by default. This flag makes "
"it makes read file mutable by default, "
"however it also defeats the purpose of "
"shared caching via hard links to save "
"space. Default is False",
)
addOptionFn(
"--maxCores",
dest="maxCores",
default=None,
metavar="INT",
help="The maximum number of CPU cores to request from the batch system at any one "
"time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. Default "
"is %s" % bytes2human(config.maxCores, symbols="iec"),
)
addOptionFn(
"--maxMemory",
dest="maxMemory",
default=None,
metavar="INT",
help="The maximum amount of memory to request from the batch system at any one "
"time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. Default "
"is %s" % bytes2human(config.maxMemory, symbols="iec"),
)
addOptionFn(
"--maxDisk",
dest="maxDisk",
default=None,
metavar="INT",
help="The maximum amount of disk space to request from the batch system at any "
"one time. Standard suffixes like K, Ki, M, Mi, G or Gi are supported. "
"Default is %s" % bytes2human(config.maxDisk, symbols="iec"),
)
#
# Retrying/rescuing jobs
#
addOptionFn = addGroupFn(
"toil options for rescuing/killing/restarting jobs",
"The options for jobs that either run too long/fail or get lost \
(some batch systems have issues!)",
)
addOptionFn(
"--retryCount",
dest="retryCount",
default=None,
help=(
"Number of times to retry a failing job before giving up and "
"labeling job failed. default=%s" % config.retryCount
),
)
addOptionFn(
"--maxJobDuration",
dest="maxJobDuration",
default=None,
help=(
"Maximum runtime of a job (in seconds) before we kill it "
"(this is a lower bound, and the actual time before killing "
"the job may be longer). default=%s" % config.maxJobDuration
),
)
addOptionFn(
"--rescueJobsFrequency",
dest="rescueJobsFrequency",
default=None,
help=(
"Period of time to wait (in seconds) between checking for "
"missing/overlong jobs, that is jobs which get lost by the batch system. Expert parameter. default=%s"
% config.rescueJobsFrequency
),
)
#
# Misc options
#
addOptionFn = addGroupFn("toil miscellaneous options", "Miscellaneous options")
addOptionFn(
"--maxLogFileSize",
dest="maxLogFileSize",
default=None,
help=(
"The maximum size of a job log file to keep (in bytes), log files larger "
"than this will be truncated to the last X bytes. Default is 50 "
"kilobytes, default=%s" % config.maxLogFileSize
),
)
addOptionFn(
"--realTimeLogging",
dest="realTimeLogging",
action="store_true",
default=False,
help="Enable real-time logging from workers to masters",
)
addOptionFn(
"--sseKey",
dest="sseKey",
default=None,
help="Path to file containing 32 character key to be used for server-side encryption on awsJobStore. SSE will "
"not be used if this flag is not passed.",
)
addOptionFn(
"--cseKey",
dest="cseKey",
default=None,
help="Path to file containing 256-bit key to be used for client-side encryption on "
"azureJobStore. By default, no encryption is used.",
)
addOptionFn(
"--setEnv",
"-e",
metavar="NAME=VALUE or NAME",
dest="environment",
default=[],
action="append",
help="Set an environment variable early on in the worker. If VALUE is omitted, "
"it will be looked up in the current environment. Independently of this "
"option, the worker will try to emulate the leader's environment before "
"running a job. Using this option, a variable can be injected into the "
"worker process itself before it is started.",
)
addOptionFn(
"--servicePollingInterval",
dest="servicePollingInterval",
default=None,
help="Interval of time service jobs wait between polling for the existence"
" of the keep-alive flag (defailt=%s)" % config.servicePollingInterval,
)
#
# Debug options
#
addOptionFn = addGroupFn("toil debug options", "Debug options")
addOptionFn(
"--badWorker",
dest="badWorker",
default=None,
help=(
"For testing purposes randomly kill 'badWorker' proportion of jobs using SIGKILL, default=%s"
% config.badWorker
),
)
addOptionFn(
"--badWorkerFailInterval",
dest="badWorkerFailInterval",
default=None,
help=(
"When killing the job pick uniformly within the interval from 0.0 to "
"'badWorkerFailInterval' seconds after the worker starts, default=%s"
% config.badWorkerFailInterval
),
)
|
https://github.com/DataBiosphere/toil/issues/869
|
cmarkello@c1master1:~$ toil clean azure:hgvm:hgvmevaljobstore
2016-05-13 23:08:36,438 INFO:toil.lib.bioio: Logging set at level: INFO
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Parsed arguments
2016-05-13 23:08:36,445 INFO:toil.utils.toilClean: Checking if we have files for toil
2016-05-13 23:08:36,525 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.table.core.windows.net
2016-05-13 23:08:36,724 INFO:requests.packages.urllib3.connectionpool: Starting new HTTPS connection (1): hgvm.blob.core.windows.net
Traceback (most recent call last):
File "/usr/local/bin/toil", line 9, in <module>
load_entry_point('toil==3.2.0a2', 'console_scripts', 'toil')()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilMain.py", line 30, in main
module.main()
File "/usr/local/lib/python2.7/dist-packages/toil/utils/toilClean.py", line 53, in main
jobStore = Toil.loadOrCreateJobStore(options.jobStore)
File "/usr/local/lib/python2.7/dist-packages/toil/common.py", line 589, in loadOrCreateJobStore
return AzureJobStore(account, namePrefix, config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 120, in __init__
super(AzureJobStore, self).__init__(config=config)
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/abstractJobStore.py", line 141, in __init__
with self.readSharedFileStream("config.pickle") as fileHandle:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/usr/local/lib/python2.7/dist-packages/toil/jobStores/azureJobStore.py", line 318, in readSharedFileStream
raise NoSuchFileException(sharedFileID)
toil.jobStores.abstractJobStore.NoSuchFileException: File 'a77581f6_ad82_5142_acb7_5a6b01eab3fb' does not exist
|
toil.jobStores.abstractJobStore.NoSuchFileException
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.