after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
async def jsonrpc_collection_update(
self,
claim_id,
bid=None,
channel_id=None,
channel_name=None,
channel_account_id=None,
clear_channel=False,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
replace=False,
**kwargs,
):
"""
Update an existing collection claim.
Usage:
collection_update (<claim_id> | --claim_id=<claim_id>) [--bid=<bid>]
[--claims=<claims>...] [--clear_claims]
[--title=<title>] [--description=<description>]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--new_signing_key]
[--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) claim_id of the collection to update
--bid=<bid> : (decimal) amount to back the claim
--claims=<claims> : (list) claim ids
--clear_claims : (bool) clear existing claim references (prior to adding new ones)
--title=<title> : (str) title of the collection
--description=<description> : (str) description of the collection
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the collection,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations of the collection, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--account_id=<account_id> : (str) account in which to look for collection (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the collection is sent
--new_signing_key : (bool) generate a new signing key, will invalidate all previous publishes
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the collection, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_collections = await self.ledger.get_collections(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_collections) != 1:
account_ids = ", ".join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the collection '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_collections[0]
if not old_txo.claim.is_collection:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a collection."
)
if bid is not None:
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
channel = None
if channel_id or channel_name:
channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
elif old_txo.claim.is_signed and not clear_channel and not replace:
channel = old_txo.channel
if replace:
claim = Claim()
claim.collection.message.source.CopyFrom(
old_txo.claim.collection.message.source
)
claim.collection.update(**kwargs)
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.collection.update(**kwargs)
tx = await Transaction.claim_update(
old_txo,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
new_txo.script.generate()
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("publish")
)
else:
await account.ledger.release_tx(tx)
return tx
|
async def jsonrpc_collection_update(
self,
claim_id,
bid=None,
channel_id=None,
channel_name=None,
channel_account_id=None,
clear_channel=False,
account_id=None,
wallet_id=None,
claim_address=None,
funding_account_ids=None,
preview=False,
blocking=False,
replace=False,
**kwargs,
):
"""
Update an existing collection claim.
Usage:
collection_update (<claim_id> | --claim_id=<claim_id>) [--bid=<bid>]
[--claims=<claims>...] [--clear_claims]
[--title=<title>] [--description=<description>]
[--tags=<tags>...] [--clear_tags]
[--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--new_signing_key]
[--funding_account_ids=<funding_account_ids>...]
[--preview] [--blocking] [--replace]
Options:
--claim_id=<claim_id> : (str) claim_id of the collection to update
--bid=<bid> : (decimal) amount to back the claim
--claims=<claims> : (list) claim ids
--clear_claims : (bool) clear existing claim references (prior to adding new ones)
--title=<title> : (str) title of the collection
--description=<description> : (str) description of the collection
--tags=<tags> : (list) add content tags
--clear_tags : (bool) clear existing tags (prior to adding new ones)
--languages=<languages> : (list) languages used by the collection,
using RFC 5646 format, eg:
for English `--languages=en`
for Spanish (Spain) `--languages=es-ES`
for Spanish (Mexican) `--languages=es-MX`
for Chinese (Simplified) `--languages=zh-Hans`
for Chinese (Traditional) `--languages=zh-Hant`
--clear_languages : (bool) clear existing languages (prior to adding new ones)
--locations=<locations> : (list) locations of the collection, consisting of 2 letter
`country` code and a `state`, `city` and a postal
`code` along with a `latitude` and `longitude`.
for JSON RPC: pass a dictionary with aforementioned
attributes as keys, eg:
...
"locations": [{'country': 'US', 'state': 'NH'}]
...
for command line: pass a colon delimited list
with values in the following order:
"COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
making sure to include colon for blank values, for
example to provide only the city:
... --locations="::Manchester"
with all values set:
... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
optionally, you can just pass the "LATITUDE:LONGITUDE":
... --locations="42.990605:-71.460989"
finally, you can also pass JSON string of dictionary
on the command line as you would via JSON RPC
... --locations="{'country': 'US', 'state': 'NH'}"
--clear_locations : (bool) clear existing locations (prior to adding new ones)
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
--account_id=<account_id> : (str) account in which to look for collection (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--claim_address=<claim_address>: (str) address where the collection is sent
--new_signing_key : (bool) generate a new signing key, will invalidate all previous publishes
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
--replace : (bool) instead of modifying specific values on
the collection, this will clear all existing values
and only save passed in values, useful for form
submissions where all values are always set
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
existing_collections = await self.ledger.get_collections(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
if len(existing_collections) != 1:
account_ids = ", ".join(f"'{account.id}'" for account in accounts)
raise Exception(
f"Can't find the collection '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_collections[0]
if not old_txo.claim.is_collection:
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a collection."
)
if bid is not None:
amount = self.get_dewies_or_error("bid", bid, positive_value=True)
else:
amount = old_txo.amount
if claim_address is not None:
self.valid_address_or_error(claim_address)
else:
claim_address = old_txo.get_address(account.ledger)
channel = None
if channel_id or channel_name:
channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
elif old_txo.claim.is_signed and not clear_channel and not replace:
channel = old_txo.channel
if replace:
claim = Claim()
claim.collection.message.source.CopyFrom(
old_txo.claim.collection.message.source
)
claim.collection.update(**kwargs)
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.collection.update(**kwargs)
tx = await Transaction.claim_update(
old_txo,
claim,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
channel,
)
new_txo = tx.outputs[0]
new_txo.script.generate()
if channel:
new_txo.sign(channel)
await tx.sign(funding_accounts)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action("publish")
else:
await account.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_support_create(
self,
claim_id,
amount,
tip=False,
account_id=None,
wallet_id=None,
funding_account_ids=None,
preview=False,
blocking=False,
):
"""
Create a support or a tip for name claim.
Usage:
support_create (<claim_id> | --claim_id=<claim_id>) (<amount> | --amount=<amount>)
[--tip] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking] [--funding_account_ids=<funding_account_ids>...]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to support
--amount=<amount> : (decimal) amount of support
--tip : (bool) send support to claim owner, default: false.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
amount = self.get_dewies_or_error("amount", amount)
claim = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
claim_address = claim.get_address(self.ledger)
if not tip:
account = wallet.get_account_or_default(account_id)
claim_address = await account.receiving.get_or_create_usable_address()
tx = await Transaction.support(
claim.claim_name,
claim_id,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_supports(
{
claim_id: [
{
"txid": tx.id,
"nout": tx.position,
"address": claim_address,
"claim_id": claim_id,
"amount": dewies_to_lbc(amount),
}
]
}
)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("new_support")
)
else:
await self.ledger.release_tx(tx)
return tx
|
async def jsonrpc_support_create(
self,
claim_id,
amount,
tip=False,
account_id=None,
wallet_id=None,
funding_account_ids=None,
preview=False,
blocking=False,
):
"""
Create a support or a tip for name claim.
Usage:
support_create (<claim_id> | --claim_id=<claim_id>) (<amount> | --amount=<amount>)
[--tip] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking] [--funding_account_ids=<funding_account_ids>...]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to support
--amount=<amount> : (decimal) amount of support
--tip : (bool) send support to claim owner, default: false.
--account_id=<account_id> : (str) account to use for holding the transaction
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until transaction is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
amount = self.get_dewies_or_error("amount", amount)
claim = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
claim_address = claim.get_address(self.ledger)
if not tip:
account = wallet.get_account_or_default(account_id)
claim_address = await account.receiving.get_or_create_usable_address()
tx = await Transaction.support(
claim.claim_name,
claim_id,
amount,
claim_address,
funding_accounts,
funding_accounts[0],
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.storage.save_supports(
{
claim_id: [
{
"txid": tx.id,
"nout": tx.position,
"address": claim_address,
"claim_id": claim_id,
"amount": dewies_to_lbc(amount),
}
]
}
)
await self.analytics_manager.send_claim_action("new_support")
else:
await self.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_support_abandon(
self,
claim_id=None,
txid=None,
nout=None,
keep=None,
account_id=None,
wallet_id=None,
preview=False,
blocking=False,
):
"""
Abandon supports, including tips, of a specific claim, optionally
keeping some amount as supports.
Usage:
support_abandon [--claim_id=<claim_id>] [(--txid=<txid> --nout=<nout>)] [--keep=<keep>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the support to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--keep=<keep> : (decimal) amount of lbc to keep as support
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
supports = await self.ledger.get_supports(
wallet=wallet, accounts=accounts, **{"txo.txid": txid, "txo.position": nout}
)
elif claim_id is not None:
supports = await self.ledger.get_supports(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception("Must specify claim_id, or txid and nout")
if not supports:
raise Exception("No supports found for the specified claim_id or txid:nout")
if keep is not None:
keep = self.get_dewies_or_error("keep", keep)
else:
keep = 0
outputs = []
if keep > 0:
outputs = [
Output.pay_support_pubkey_hash(
keep,
supports[0].claim_name,
supports[0].claim_id,
supports[0].pubkey_hash,
)
]
tx = await Transaction.create(
[Input.spend(txo) for txo in supports], outputs, accounts, account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(
self.analytics_manager.send_claim_action("abandon")
)
else:
await self.ledger.release_tx(tx)
return tx
|
async def jsonrpc_support_abandon(
self,
claim_id=None,
txid=None,
nout=None,
keep=None,
account_id=None,
wallet_id=None,
preview=False,
blocking=False,
):
"""
Abandon supports, including tips, of a specific claim, optionally
keeping some amount as supports.
Usage:
support_abandon [--claim_id=<claim_id>] [(--txid=<txid> --nout=<nout>)] [--keep=<keep>]
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--preview] [--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the support to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--keep=<keep> : (decimal) amount of lbc to keep as support
--account_id=<account_id> : (str) id of the account to use
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until abandon is in mempool
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
assert not wallet.is_locked, "Cannot spend funds with locked wallet, unlock first."
if account_id:
account = wallet.get_account_or_error(account_id)
accounts = [account]
else:
account = wallet.default_account
accounts = wallet.accounts
if txid is not None and nout is not None:
supports = await self.ledger.get_supports(
wallet=wallet, accounts=accounts, **{"txo.txid": txid, "txo.position": nout}
)
elif claim_id is not None:
supports = await self.ledger.get_supports(
wallet=wallet, accounts=accounts, claim_id=claim_id
)
else:
raise Exception("Must specify claim_id, or txid and nout")
if not supports:
raise Exception("No supports found for the specified claim_id or txid:nout")
if keep is not None:
keep = self.get_dewies_or_error("keep", keep)
else:
keep = 0
outputs = []
if keep > 0:
outputs = [
Output.pay_support_pubkey_hash(
keep,
supports[0].claim_name,
supports[0].claim_id,
supports[0].pubkey_hash,
)
]
tx = await Transaction.create(
[Input.spend(txo) for txo in supports], outputs, accounts, account
)
if not preview:
await self.broadcast_or_release(tx, blocking)
await self.analytics_manager.send_claim_action("abandon")
else:
await self.ledger.release_tx(tx)
return tx
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def jsonrpc_file_reflect(self, **kwargs):
"""
Reflect all the blobs in a file matching the filter criteria
Usage:
file_reflect [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>]
[--reflector=<reflector>]
Options:
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--reflector=<reflector> : (str) reflector server, ip address or url
by default choose a server from the config
Returns:
(list) list of blobs reflected
"""
server, port = kwargs.get("server"), kwargs.get("port")
if server and port:
port = int(port)
else:
server, port = random.choice(self.conf.reflector_servers)
reflected = await asyncio.gather(
*[
self.stream_manager.reflect_stream(stream, server, port)
for stream in self.stream_manager.get_filtered_streams(**kwargs)
]
)
total = []
for reflected_for_stream in reflected:
total.extend(reflected_for_stream)
return total
|
async def jsonrpc_file_reflect(self, **kwargs):
"""
Reflect all the blobs in a file matching the filter criteria
Usage:
file_reflect [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>]
[--reflector=<reflector>]
Options:
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--reflector=<reflector> : (str) reflector server, ip address or url
by default choose a server from the config
Returns:
(list) list of blobs reflected
"""
server, port = kwargs.get("server"), kwargs.get("port")
if server and port:
port = int(port)
else:
server, port = random.choice(self.conf.reflector_servers)
reflected = await asyncio.gather(
*[
stream.upload_to_reflector(server, port)
for stream in self.stream_manager.get_filtered_streams(**kwargs)
]
)
total = []
for reflected_for_stream in reflected:
total.extend(reflected_for_stream)
return total
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
def __init__(
self,
loop: asyncio.AbstractEventLoop,
config: "Config",
blob_manager: "BlobManager",
wallet_manager: "WalletManager",
storage: "SQLiteStorage",
node: Optional["Node"],
analytics_manager: Optional["AnalyticsManager"] = None,
):
self.loop = loop
self.config = config
self.blob_manager = blob_manager
self.wallet_manager = wallet_manager
self.storage = storage
self.node = node
self.analytics_manager = analytics_manager
self.streams: typing.Dict[str, ManagedStream] = {}
self.resume_saving_task: Optional[asyncio.Task] = None
self.re_reflect_task: Optional[asyncio.Task] = None
self.update_stream_finished_futs: typing.List[asyncio.Future] = []
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
self.started = asyncio.Event(loop=self.loop)
|
def __init__(
self,
loop: asyncio.AbstractEventLoop,
config: "Config",
blob_manager: "BlobManager",
wallet_manager: "WalletManager",
storage: "SQLiteStorage",
node: Optional["Node"],
analytics_manager: Optional["AnalyticsManager"] = None,
):
self.loop = loop
self.config = config
self.blob_manager = blob_manager
self.wallet_manager = wallet_manager
self.storage = storage
self.node = node
self.analytics_manager = analytics_manager
self.streams: typing.Dict[str, ManagedStream] = {}
self.resume_saving_task: Optional[asyncio.Task] = None
self.re_reflect_task: Optional[asyncio.Task] = None
self.update_stream_finished_futs: typing.List[asyncio.Future] = []
self.running_reflector_uploads: typing.List[asyncio.Task] = []
self.started = asyncio.Event(loop=self.loop)
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def reflect_streams(self):
while True:
if self.config.reflect_streams and self.config.reflector_servers:
sd_hashes = await self.storage.get_streams_to_re_reflect()
sd_hashes = [sd for sd in sd_hashes if sd in self.streams]
batch = []
while sd_hashes:
stream = self.streams[sd_hashes.pop()]
if (
self.blob_manager.is_blob_verified(stream.sd_hash)
and stream.blobs_completed
and stream.sd_hash not in self.running_reflector_uploads
and not stream.fully_reflected.is_set()
):
batch.append(self.reflect_stream(stream))
if len(batch) >= self.config.concurrent_reflector_uploads:
await asyncio.gather(*batch, loop=self.loop)
batch = []
if batch:
await asyncio.gather(*batch, loop=self.loop)
await asyncio.sleep(300, loop=self.loop)
|
async def reflect_streams(self):
while True:
if self.config.reflect_streams and self.config.reflector_servers:
sd_hashes = await self.storage.get_streams_to_re_reflect()
sd_hashes = [sd for sd in sd_hashes if sd in self.streams]
batch = []
while sd_hashes:
stream = self.streams[sd_hashes.pop()]
if (
self.blob_manager.is_blob_verified(stream.sd_hash)
and stream.blobs_completed
):
if not stream.fully_reflected.is_set():
host, port = random.choice(self.config.reflector_servers)
batch.append(stream.upload_to_reflector(host, port))
if len(batch) >= self.config.concurrent_reflector_uploads:
await asyncio.gather(*batch, loop=self.loop)
batch = []
if batch:
await asyncio.gather(*batch, loop=self.loop)
await asyncio.sleep(300, loop=self.loop)
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
def stop(self):
if self.resume_saving_task and not self.resume_saving_task.done():
self.resume_saving_task.cancel()
if self.re_reflect_task and not self.re_reflect_task.done():
self.re_reflect_task.cancel()
while self.streams:
_, stream = self.streams.popitem()
stream.stop_tasks()
while self.update_stream_finished_futs:
self.update_stream_finished_futs.pop().cancel()
while self.running_reflector_uploads:
_, t = self.running_reflector_uploads.popitem()
t.cancel()
self.started.clear()
log.info("finished stopping the stream manager")
|
def stop(self):
if self.resume_saving_task and not self.resume_saving_task.done():
self.resume_saving_task.cancel()
if self.re_reflect_task and not self.re_reflect_task.done():
self.re_reflect_task.cancel()
while self.streams:
_, stream = self.streams.popitem()
stream.stop_tasks()
while self.update_stream_finished_futs:
self.update_stream_finished_futs.pop().cancel()
while self.running_reflector_uploads:
self.running_reflector_uploads.pop().cancel()
self.started.clear()
log.info("finished stopping the stream manager")
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def create_stream(
self,
file_path: str,
key: Optional[bytes] = None,
iv_generator: Optional[typing.Generator[bytes, None, None]] = None,
) -> ManagedStream:
stream = await ManagedStream.create(
self.loop, self.config, self.blob_manager, file_path, key, iv_generator
)
self.streams[stream.sd_hash] = stream
self.storage.content_claim_callbacks[stream.stream_hash] = (
lambda: self._update_content_claim(stream)
)
if self.config.reflect_streams and self.config.reflector_servers:
self.reflect_stream(stream)
return stream
|
async def create_stream(
self,
file_path: str,
key: Optional[bytes] = None,
iv_generator: Optional[typing.Generator[bytes, None, None]] = None,
) -> ManagedStream:
stream = await ManagedStream.create(
self.loop, self.config, self.blob_manager, file_path, key, iv_generator
)
self.streams[stream.sd_hash] = stream
self.storage.content_claim_callbacks[stream.stream_hash] = (
lambda: self._update_content_claim(stream)
)
if self.config.reflect_streams and self.config.reflector_servers:
host, port = random.choice(self.config.reflector_servers)
task = self.loop.create_task(stream.upload_to_reflector(host, port))
self.running_reflector_uploads.append(task)
task.add_done_callback(
lambda _: None
if task not in self.running_reflector_uploads
else self.running_reflector_uploads.remove(task)
)
return stream
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def delete_stream(
self, stream: ManagedStream, delete_file: Optional[bool] = False
):
if stream.sd_hash in self.running_reflector_uploads:
self.running_reflector_uploads[stream.sd_hash].cancel()
stream.stop_tasks()
if stream.sd_hash in self.streams:
del self.streams[stream.sd_hash]
blob_hashes = [stream.sd_hash] + [b.blob_hash for b in stream.descriptor.blobs[:-1]]
await self.blob_manager.delete_blobs(blob_hashes, delete_from_db=False)
await self.storage.delete_stream(stream.descriptor)
if delete_file and stream.output_file_exists:
os.remove(stream.full_path)
|
async def delete_stream(
self, stream: ManagedStream, delete_file: Optional[bool] = False
):
stream.stop_tasks()
if stream.sd_hash in self.streams:
del self.streams[stream.sd_hash]
blob_hashes = [stream.sd_hash] + [b.blob_hash for b in stream.descriptor.blobs[:-1]]
await self.blob_manager.delete_blobs(blob_hashes, delete_from_db=False)
await self.storage.delete_stream(stream.descriptor)
if delete_file and stream.output_file_exists:
os.remove(stream.full_path)
|
https://github.com/lbryio/lbry-sdk/issues/2368
|
2019-08-01 18:18:06,916 INFO lbry.stream.reflector.client:119: Sent reflector blob e5828b7e
2019-08-01 18:18:29,221 INFO lbry.stream.reflector.client:119: Sent reflector blob 867d1a2c
2019-08-01 18:18:51,545 INFO lbry.stream.reflector.client:119: Sent reflector blob 6e365367
2019-08-01 18:18:58,400 INFO lbry.extras.daemon.Daemon:2330: publishing: name: xxxx params: {'title': 'xxxx', 'description': 'xxxxxx', 'bid': '0.10000000', 'languages': ['en'], 'tags': ['xxxxx'], 'thumbnail_url': 'https://spee.ch/8/xxxxxxxxx', 'license': 'All rights reserved Published With Permission', 'release_time': 1564696220, 'channel_id': 'xxxxxxxxxxxx', 'fee_currency': 'LBC', 'fee_amount': '1.00000000', 'file_path': 'C:\\Users\\David T\\Desktop\\GMCZ0549_VID_1280x720x1500kbs_HCVC549.mp4'}
2019-08-01 18:19:22,348 ERROR lbry.extras.daemon.Daemon:586: error handling api request
Traceback (most recent call last):
File "lbry\extras\daemon\Daemon.py", line 580, in _process_rpc_call
File "lbry\extras\daemon\Daemon.py", line 2342, in jsonrpc_publish
File "lbry\extras\daemon\Daemon.py", line 2659, in jsonrpc_stream_update
File "lbry\stream\stream_manager.py", line 232, in delete_stream
AttributeError: 'NoneType' object has no attribute 'stop_tasks'
2019-08-01 18:19:37,886 INFO lbry.stream.reflector.client:119: Sent reflector blob f5b10e7b
2019-08-01 18:20:00,717 INFO lbry.stream.reflector.client:119: Sent reflector blob d5ffe375
|
AttributeError
|
async def connect(cls, path: Union[bytes, str], *args, **kwargs):
def _connect():
return sqlite3.connect(path, *args, **kwargs)
db = cls()
db.connection = await asyncio.get_event_loop().run_in_executor(
db.executor, _connect
)
return db
|
async def connect(cls, path: Union[bytes, str], *args, **kwargs):
db = cls()
db.connection = await wrap_future(
db.executor.submit(sqlite3.connect, path, *args, **kwargs)
)
return db
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def executemany(self, sql: str, params: Iterable):
params = params if params is not None else []
# this fetchall is needed to prevent SQLITE_MISUSE
return self.run(lambda conn: conn.executemany(sql, params).fetchall())
|
def executemany(self, sql: str, params: Iterable):
def __executemany_in_a_transaction(conn: sqlite3.Connection, *args, **kwargs):
return conn.executemany(*args, **kwargs)
return self.run(__executemany_in_a_transaction, sql, params)
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def executescript(self, script: str) -> Awaitable:
return self.run(lambda conn: conn.executescript(script))
|
def executescript(self, script: str) -> Awaitable:
return wrap_future(self.executor.submit(self.connection.executescript, script))
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def execute_fetchall(
self, sql: str, parameters: Iterable = None
) -> Awaitable[Iterable[sqlite3.Row]]:
parameters = parameters if parameters is not None else []
return self.run(lambda conn: conn.execute(sql, parameters).fetchall())
|
def execute_fetchall(
self, sql: str, parameters: Iterable = None
) -> Awaitable[Iterable[sqlite3.Row]]:
parameters = parameters if parameters is not None else []
def __fetchall(conn: sqlite3.Connection, *args, **kwargs):
return conn.execute(*args, **kwargs).fetchall()
return wrap_future(
self.executor.submit(__fetchall, self.connection, sql, parameters)
)
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def execute(self, sql: str, parameters: Iterable = None) -> Awaitable[sqlite3.Cursor]:
parameters = parameters if parameters is not None else []
return self.run(lambda conn: conn.execute(sql, parameters))
|
def execute(self, sql: str, parameters: Iterable = None) -> Awaitable[sqlite3.Cursor]:
parameters = parameters if parameters is not None else []
return self.run(
lambda conn, sql, parameters: conn.execute(sql, parameters), sql, parameters
)
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def run(self, fun, *args, **kwargs) -> Awaitable:
return asyncio.get_event_loop().run_in_executor(
self.executor, lambda: self.__run_transaction(fun, *args, **kwargs)
)
|
def run(self, fun, *args, **kwargs) -> Awaitable:
return wrap_future(
self.executor.submit(self.__run_transaction, fun, *args, **kwargs)
)
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def __run_transaction(
self, fun: Callable[[sqlite3.Connection, Any, Any], Any], *args, **kwargs
):
self.connection.execute("begin")
try:
result = fun(self.connection, *args, **kwargs) # type: ignore
self.connection.commit()
return result
except (Exception, OSError) as e:
log.exception("Error running transaction:", exc_info=e)
self.connection.rollback()
log.warning("rolled back")
raise
|
def __run_transaction(
self, fun: Callable[[sqlite3.Connection, Any, Any], Any], *args, **kwargs
):
self.connection.execute("begin")
try:
result = fun(self.connection, *args, **kwargs) # type: ignore
self.connection.commit()
return result
except (Exception, OSError): # as e:
# log.exception('Error running transaction:', exc_info=e)
self.connection.rollback()
raise
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def run_with_foreign_keys_disabled(self, fun, *args, **kwargs) -> Awaitable:
return asyncio.get_event_loop().run_in_executor(
self.executor,
self.__run_transaction_with_foreign_keys_disabled,
fun,
args,
kwargs,
)
|
def run_with_foreign_keys_disabled(self, fun, *args, **kwargs) -> Awaitable:
return wrap_future(
self.executor.submit(
self.__run_transaction_with_foreign_keys_disabled, fun, *args, **kwargs
)
)
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def __run_transaction_with_foreign_keys_disabled(
self, fun: Callable[[sqlite3.Connection, Any, Any], Any], args, kwargs
):
(foreign_keys_enabled,) = self.connection.execute("pragma foreign_keys").fetchone()
if not foreign_keys_enabled:
raise sqlite3.IntegrityError(
"foreign keys are disabled, use `AIOSQLite.run` instead"
)
try:
self.connection.execute("pragma foreign_keys=off")
return self.__run_transaction(fun, *args, **kwargs)
finally:
self.connection.execute("pragma foreign_keys=on")
|
def __run_transaction_with_foreign_keys_disabled(
self, fun: Callable[[sqlite3.Connection, Any, Any], Any], *args, **kwargs
):
(foreign_keys_enabled,) = self.connection.execute("pragma foreign_keys").fetchone()
if not foreign_keys_enabled:
raise sqlite3.IntegrityError(
"foreign keys are disabled, use `AIOSQLite.run` instead"
)
try:
self.connection.execute("pragma foreign_keys=off")
return self.__run_transaction(fun, *args, **kwargs)
finally:
self.connection.execute("pragma foreign_keys=on")
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
async def add_keys(self, account, chain, keys):
await self.db.executemany(
"insert into pubkey_address values (?, ?, ?, ?, ?, NULL, 0)",
(
(
pubkey.address,
account.public_key.address,
chain,
position,
sqlite3.Binary(pubkey.pubkey_bytes),
)
for position, pubkey in keys
),
)
|
async def add_keys(self, account, chain, keys):
await self.db.executemany(
"insert into pubkey_address (address, account, chain, position, pubkey) values (?, ?, ?, ?, ?)",
(
(
pubkey.address,
account.public_key.address,
chain,
position,
sqlite3.Binary(pubkey.pubkey_bytes),
)
for position, pubkey in keys
),
)
|
https://github.com/lbryio/lbry-sdk/issues/2164
|
2019-05-22 11:29:08,217 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bRFrdNffbqpmLnJDv9n3WPXHSQZx3jbQ9d', 'bCooREDfWchZp2W3J3sp4cpYFTwFgjs4HB', 'bJ4t5xDH3VaCn9tyjf2SGKr9E5NWsTVoZa', 'bR8NvBkV4FKzm3GsrLTLpEdajoPSin86Hg', 'bGuS42xgcRH6eVoXmFusiie2z85v4fbkm5', 'bWc6ZgefAwqvKddQGhbnuNuGe5Qdt3JyS5', 'bGVmKgRnPHq9XWthMoVkJYhP6fM3FuZjFC', 'bZKK6irm4fKg1RrsZA7vTBMuDk7JruSkCe', 'bEWAJXE7uYckDKTzkEREUfnC5Rw4y5y8fa', 'bKMV41fHzU5a3c32HJ42ZL39jXprz9muze', 'bRu9SZhSwrKiPhJEoDSXMsb7JyHmJRg13C', 'bH3J7FnNPtA8RC4CL7pLYZcRBuDE3dfzgk', 'bPwfPQTUTPncazeeJdrNcTYhjKkLQkRDNJ', 'bRaQP4JbgTDJbgvbJaYWNVHzpBoi3TDHGD', 'bWt4UvNRbUqwbJtW5gUabEj3dG2uaRVJzD', 'bUKFnd7uSnFyQbvqvpdK3MGvRA8HccuqYt', 'bEsC4CeSCP5sRsnMgt8G7L2HewrwRzg3en', 'baASTNc6t6V82wNrgSwvwuYK3s6Uga93w7', 'bL5vFyUvzazUkWdgqG1ZRQksW1zBP6KWb5', 'bUqVumd8AYbCNDkvSTPbcN24K7Lk5SnFkm']
2019-05-22 11:29:08,229 INFO torba.client.baseledger:125: (lbc_mainnet) on_address: ['bLrEADwdXaTuzqJp74JaQ2oKd7TabEAuEX', 'bCvuQRyHLBS1RsQUzYRYvu4FQsVLkB8CqG', 'bRmhdKXXaWJVQP9rc4g6cXzzyoJM5qsDMg', 'bTz5yM5yn19gMTQoJJyUBgubJ34nkuViqh', 'bQ5mfFViSU8b69uNcESfteoF1pAoC2XNrs', 'bCvjVRgnKm28CBLZNDGWHGos2Dv5QL4byA', 'bFLvefdk1Da2GB5oxVZXsK4PZ8Bs24Ghbf', 'bMtWobUFJMutGJ6o5UHN4EAHtK3NnRoxu5', 'bD45BgoG6DxywWzZKY6fbf787M1v2ZgVgZ', 'bEcykVNdYmK9M5JNaCPgfcr1qeK78jGYGf', 'bJZa3GrhWs4u7NZyHgZutaPybQGGeMy1cY', 'bap8B9oci9y1uCPQYbJaguE6ty3N8UnBvM', 'bH7jrnURyV4Azmit8LcfdMgQST5sdhTKx8', 'bNR2EWJGXbU24zxsu1wAw139vwJvmcVizs', 'bM6j8GyvRxZ6xL1kwnKm6TXwYeU3D8CYGS', 'bHcemnXEnisLAouvk3amqXx8X1BfJrTThT', 'bKxxwYWqjsjX9yYwgygWzNGkMYD48UbuYm', 'bP8yrGLewLZKJ8T9EaPXVYWJdNY5Po9V8S', 'bMYhng5tGaa88X62DfnWynmSdM6aPVQamF', 'bHQrTNGLHkMrwZETJaFcD2RwLKdkoQV1oR']
Task exception was never retrieved
future: <Task finished coro=<BaseLedger.subscribe_account() done, defined at c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py:344> exception=InterfaceError('Error binding parameter 0 - probably unsupported type.')>
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\torba\torba\client\baseledger.py", line 347, in subscribe_account
await account.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 375, in ensure_address_gap
new_addresses = await address_manager.ensure_address_gap()
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 139, in ensure_address_gap
new_keys = await self._generate_keys(start, end-1)
File "c:\users\thoma\documents\lbry\torba\torba\client\baseaccount.py", line 147, in _generate_keys
await self.account.ledger.db.add_keys(self.account, self.chain_number, keys)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 516, in add_keys
for position, pubkey in keys)
File "C:\Users\thoma\AppData\Local\Programs\Python\Python37-32\lib\concurrent\futures\thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 61, in __run_transaction
result = fun(self.connection, *args, **kwargs) # type: ignore
File "c:\users\thoma\documents\lbry\torba\torba\client\basedatabase.py", line 39, in __executemany_in_a_transaction
return conn.executemany(*args, **kwargs)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
2019-05-22 11:29:08,305 INFO torba.client.baseledger:118: (lbc_mainnet) on_transacti
|
sqlite3.InterfaceError
|
def is_comment_signed_by_channel(comment: dict, channel: Output):
if type(channel) is Output:
try:
pieces = [
comment["signing_ts"].encode(),
channel.claim_hash,
comment["comment"].encode(),
]
return Output.is_signature_valid(
get_encoded_signature(comment["signature"]),
sha256(b"".join(pieces)),
channel.claim.channel.public_key_bytes,
)
except KeyError:
pass
return False
|
def is_comment_signed_by_channel(comment: dict, channel: Output):
try:
pieces = [
comment["signing_ts"].encode(),
channel.claim_hash,
comment["comment"].encode(),
]
return Output.is_signature_valid(
get_encoded_signature(comment["signature"]),
sha256(b"".join(pieces)),
channel.claim.channel.public_key_bytes,
)
except KeyError:
pass
return False
|
https://github.com/lbryio/lbry-sdk/issues/2279
|
(lbry-venv) C:\Users\thoma\Documents\lbry>lbrynet comment list --claim_id=20f54f401de44b9b5ba866e60286144995824468{
"code": -32500,
"data": [
"Traceback (most recent call last):",
" File \"c:\\users\\thoma\\documents\\lbry\\lbry\\lbry\\extras\\daemon\\Daemon.py\", line 579, in _process_rpc_call",
" result = await result",
" File \"c:\\users\\thoma\\documents\\lbry\\lbry\\lbry\\extras\\daemon\\Daemon.py\", line 3429, in jsonrpc_comment_list",
" cmnt['is_channel_signature_valid'] = is_comment_signed_by_channel(cmnt, channel)",
" File \"c:\\users\\thoma\\documents\\lbry\\lbry\\lbry\\extras\\daemon\\comment_client.py\", line 25, in is_comment_signed_by_channel",
" channel.claim_hash,",
"AttributeError: 'dict' object has no attribute 'claim_hash'",
""
],
"message": "'dict' object has no attribute 'claim_hash'"```
|
AttributeError
|
async def _inner(*args, **kwargs):
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
if key in lru_cache:
return lru_cache.get(key)
concurrent_cache[key] = concurrent_cache.get(key) or asyncio.create_task(
async_fn(*args, **kwargs)
)
try:
result = await concurrent_cache[key]
lru_cache.set(key, result)
return result
finally:
concurrent_cache.pop(key, None)
|
async def _inner(*args, **kwargs) -> typing.Any:
loop = asyncio.get_running_loop()
now = loop.time()
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
if key in cache and (now - cache[key][1] < duration):
return cache[key][0]
to_cache = await fn(*args, **kwargs)
cache[key] = to_cache, now
return to_cache
|
https://github.com/lbryio/lbry-sdk/issues/2137
|
2019-05-16 15:14:35,679 INFO lbrynet.dht.blob_announcer:51: announced 5 blobs
2019-05-16 15:15:04,419 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568534, change=1) header blocks, final height 568534
2019-05-16 15:15:21,629 INFO lbrynet.blob_exchange.client:137: downloaded 4af012f8 from 18.213.255.15:5567
2019-05-16 15:15:21,652 INFO lbrynet.stream.managed_stream:358: write blob 8/76
2019-05-16 15:15:28,815 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568535, change=1) header blocks, final height 568535
2019-05-16 15:16:24,383 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:24,391 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: Failed to resolve "mp3" within the timeout
2019-05-16 15:16:31,259 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:31,267 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: 'str' object has no attribute 'get'
2019-05-16 15:16:39,715 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:39,728 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: 'str' object has no attribute 'get'
2019-05-16 15:18:36,193 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568536, change=1) header blocks, final height 568536
|
concurrent.futures._base.CancelledError
|
def wrapper(async_fn):
@functools.wraps(async_fn)
async def _inner(*args, **kwargs):
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
if key in lru_cache:
return lru_cache.get(key)
concurrent_cache[key] = concurrent_cache.get(key) or asyncio.create_task(
async_fn(*args, **kwargs)
)
try:
result = await concurrent_cache[key]
lru_cache.set(key, result)
return result
finally:
concurrent_cache.pop(key, None)
return _inner
|
async def wrapper(*args, **kwargs):
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
cache[key] = cache.get(key) or asyncio.create_task(async_fn(*args, **kwargs))
try:
return await cache[key]
finally:
cache.pop(key, None)
|
https://github.com/lbryio/lbry-sdk/issues/2137
|
2019-05-16 15:14:35,679 INFO lbrynet.dht.blob_announcer:51: announced 5 blobs
2019-05-16 15:15:04,419 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568534, change=1) header blocks, final height 568534
2019-05-16 15:15:21,629 INFO lbrynet.blob_exchange.client:137: downloaded 4af012f8 from 18.213.255.15:5567
2019-05-16 15:15:21,652 INFO lbrynet.stream.managed_stream:358: write blob 8/76
2019-05-16 15:15:28,815 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568535, change=1) header blocks, final height 568535
2019-05-16 15:16:24,383 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:24,391 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: Failed to resolve "mp3" within the timeout
2019-05-16 15:16:31,259 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:31,267 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: 'str' object has no attribute 'get'
2019-05-16 15:16:39,715 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:39,728 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: 'str' object has no attribute 'get'
2019-05-16 15:18:36,193 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568536, change=1) header blocks, final height 568536
|
concurrent.futures._base.CancelledError
|
async def _fetch_tx(self, txid):
return self.transaction_class(unhexlify(await self.network.get_transaction(txid)))
|
def _fetch_tx(self, txid):
async def __fetch_parse(txid):
return self.transaction_class(
unhexlify(await self.network.get_transaction(txid))
)
return asyncio.ensure_future(__fetch_parse(txid))
|
https://github.com/lbryio/lbry-sdk/issues/2137
|
2019-05-16 15:14:35,679 INFO lbrynet.dht.blob_announcer:51: announced 5 blobs
2019-05-16 15:15:04,419 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568534, change=1) header blocks, final height 568534
2019-05-16 15:15:21,629 INFO lbrynet.blob_exchange.client:137: downloaded 4af012f8 from 18.213.255.15:5567
2019-05-16 15:15:21,652 INFO lbrynet.stream.managed_stream:358: write blob 8/76
2019-05-16 15:15:28,815 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568535, change=1) header blocks, final height 568535
2019-05-16 15:16:24,383 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:24,391 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: Failed to resolve "mp3" within the timeout
2019-05-16 15:16:31,259 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:31,267 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: 'str' object has no attribute 'get'
2019-05-16 15:16:39,715 ERROR lbrynet.wallet.resolve:42:
Traceback (most recent call last):
File "lbrynet/wallet/resolve.py", line 38, in resolve
File "lbrynet/wallet/resolve.py", line 67, in _handle_resolutions
File "lbrynet/wallet/resolve.py", line 109, in _handle_resolve_uri_response
File "lbrynet/wallet/resolve.py", line 172, in parse_and_validate_claim_result
concurrent.futures._base.CancelledError
2019-05-16 15:16:39,728 WARNING lbrynet.extras.daemon.Daemon:914: Error downloading mp3: 'str' object has no attribute 'get'
2019-05-16 15:18:36,193 INFO torba.client.baseledger:133: lbc_mainnet: added BlockHeightEvent(height=568536, change=1) header blocks, final height 568536
|
concurrent.futures._base.CancelledError
|
async def save_max_gap(self):
if issubclass(self.address_generator, HierarchicalDeterministic):
gap = await self.get_max_gap()
self.receiving.gap = max(20, gap["max_receiving_gap"] + 1)
self.change.gap = max(6, gap["max_change_gap"] + 1)
self.wallet.save()
|
async def save_max_gap(self):
gap = await self.get_max_gap()
self.receiving.gap = max(20, gap["max_receiving_gap"] + 1)
self.change.gap = max(6, gap["max_change_gap"] + 1)
self.wallet.save()
|
https://github.com/lbryio/lbry-sdk/issues/2078
|
Traceback (most recent call last):
File "c:\users\thoma\documents\lbry\lbrynet\extras\daemon\Component.py", line 55, in _setup
result = await self.start()
File "c:\users\thoma\documents\lbry\lbrynet\extras\daemon\Components.py", line 268, in start
await self.wallet_manager.start()
File "c:\users\thoma\documents\lbry\lbry-venv\lib\site-packages\torba\client\basemanager.py", line 73, in start
l.start() for l in self.ledgers.values()
File "c:\users\thoma\documents\lbry\lbrynet\wallet\ledger.py", line 69, in start
await asyncio.gather(*(a.save_max_gap() for a in self.accounts))
File "c:\users\thoma\documents\lbry\lbrynet\wallet\account.py", line 171, in save_max_gap
self.receiving.gap = max(20, gap['max_receiving_gap'] + 1)
AttributeError: 'SingleKey' object has no attribute 'gap'
|
AttributeError
|
async def _announce(self, batch_size: typing.Optional[int] = 10):
if not batch_size:
return
if not self.node.joined.is_set():
await self.node.joined.wait()
blob_hashes = await self.storage.get_blobs_to_announce()
if blob_hashes:
self.announce_queue.extend(blob_hashes)
log.info("%i blobs to announce", len(blob_hashes))
batch = []
while len(self.announce_queue):
cnt = 0
announced = []
while self.announce_queue and cnt < batch_size:
blob_hash = self.announce_queue.pop()
announced.append(blob_hash)
batch.append(self.node.announce_blob(blob_hash))
cnt += 1
to_await = []
while batch:
to_await.append(batch.pop())
if to_await:
await asyncio.gather(*tuple(to_await), loop=self.loop)
await self.storage.update_last_announced_blobs(announced, self.loop.time())
log.info("announced %i blobs", len(announced))
if self.running:
self.pending_call = self.loop.call_later(60, self.announce, batch_size)
|
async def _announce(self, batch_size: typing.Optional[int] = 10):
if not self.node.joined.is_set():
await self.node.joined.wait()
blob_hashes = await self.storage.get_blobs_to_announce()
if blob_hashes:
self.announce_queue.extend(blob_hashes)
log.info("%i blobs to announce", len(blob_hashes))
batch = []
while len(self.announce_queue):
cnt = 0
announced = []
while self.announce_queue and cnt < batch_size:
blob_hash = self.announce_queue.pop()
announced.append(blob_hash)
batch.append(self.node.announce_blob(blob_hash))
cnt += 1
to_await = []
while batch:
to_await.append(batch.pop())
if to_await:
await asyncio.gather(*tuple(to_await), loop=self.loop)
await self.storage.update_last_announced_blobs(announced, self.loop.time())
log.info("announced %i blobs", len(announced))
if self.running:
self.pending_call = self.loop.call_later(60, self.announce, batch_size)
|
https://github.com/lbryio/lbry-sdk/issues/1297
|
2018-07-10 15:35:31,165 INFO lbrynet.daemon.Daemon:242: Starting lbrynet-daemon
2018-07-10 15:35:31,187 INFO lbrynet.daemon.ExchangeRateManager:217: Starting exchange rate manager
2018-07-10 15:35:31,313 INFO lbrynet.daemon.Daemon:284: Platform: {"lbrynet_version": "0.20.3", "platform": "Windows-10-10.0.15063", "os_system": "Windows", "python_version": "2.7.15", "os_release": "10", "lbryum_version": "3.2.3", "ip": "47.17.162.103", "lbryschema_version": "0.0.16", "processor": "Intel64 Family 6 Model 60 Stepping 3, GenuineIntel", "build": "release"}
2018-07-10 15:35:31,315 INFO lbrynet.daemon.Daemon:487: Loading databases
2018-07-10 15:35:31,319 INFO lbrynet.daemon.Daemon:542: Using lbryum wallet
2018-07-10 15:35:31,334 WARNING lbrynet.core.Session:210: UPnP failed. Reason: ConflictInMappingEntry
2018-07-10 15:35:31,335 ERROR lbrynet.daemon.DaemonControl:118: Failed to start lbrynet-daemon
Traceback (most recent call last):
File "site-packages\lbrynet\daemon\DaemonControl.py", line 115, in start_server_and_listen
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\daemon\DaemonServer.py", line 52, in start
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\daemon\Daemon.py", line 252, in setup
File "site-packages\twisted\internet\defer.py", line 651, in _runCallbacks
File "site-packages\lbrynet\core\Session.py", line 135, in <lambda>
File "site-packages\lbrynet\core\Session.py", line 227, in _setup_dht
File "site-packages\lbrynet\dht\hashannouncer.py", line 21, in __init__
File "site-packages\twisted\internet\defer.py", line 1576, in __init__
ValueError: DeferredSemaphore requires tokens >= 1
2018-07-10 15:35:31,358 INFO lbrynet.daemon.DaemonServer:56: Stopping the reactor
2018-07-10 15:35:31,358 INFO lbrynet.daemon.Daemon:420: Closing lbrynet session
2018-07-10 15:35:31,358 INFO lbrynet.daemon.Daemon:421: Status at time of shutdown: loading_wallet
2018-07-10 15:35:31,359 INFO lbrynet.core.Session:141: Stopping session.
2018-07-10 15:35:31,359 INFO lbrynet.core.Wallet:208: Stopping wallet.
|
ValueError
|
def start(self):
self.upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
self.external_peer_port = self.upnp_component.upnp_redirects.get(
"TCP", GCS("peer_port")
)
self.external_udp_port = self.upnp_component.upnp_redirects.get(
"UDP", GCS("dht_node_port")
)
node_id = CS.get_node_id()
if node_id is None:
node_id = generate_id()
external_ip = self.upnp_component.external_ip
if not external_ip:
log.warning("UPnP component failed to get external ip")
external_ip = CS.get_external_ip()
if not external_ip:
log.warning("failed to get external ip")
self.dht_node = node.Node(
node_id=node_id,
udpPort=GCS("dht_node_port"),
externalUDPPort=self.external_udp_port,
externalIP=external_ip,
peerPort=self.external_peer_port,
)
self.dht_node.start_listening()
yield self.dht_node._protocol._listening
d = self.dht_node.joinNetwork(GCS("known_dht_nodes"))
d.addCallback(lambda _: self.dht_node.start_looping_calls())
d.addCallback(lambda _: log.info("Joined the dht"))
log.info("Started the dht")
|
def start(self):
self.upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
self.external_peer_port = self.upnp_component.upnp_redirects.get(
"TCP", GCS("peer_port")
)
self.external_udp_port = self.upnp_component.upnp_redirects.get(
"UDP", GCS("dht_node_port")
)
node_id = CS.get_node_id()
if node_id is None:
node_id = generate_id()
self.dht_node = node.Node(
node_id=node_id,
udpPort=GCS("dht_node_port"),
externalUDPPort=self.external_udp_port,
externalIP=self.upnp_component.external_ip,
peerPort=self.external_peer_port,
)
self.dht_node.start_listening()
yield self.dht_node._protocol._listening
d = self.dht_node.joinNetwork(GCS("known_dht_nodes"))
d.addCallback(lambda _: self.dht_node.start_looping_calls())
d.addCallback(lambda _: log.info("Joined the dht"))
log.info("Started the dht")
|
https://github.com/lbryio/lbry-sdk/issues/1561
|
2018-10-28 21:39:16,952 WARNING lbrynet.daemon.Components:712: upnp discovery failed: {http://schemas.microsoft.com/windows/pnpx/2005/11}X_hardwareId
2018-10-28 21:39:16,953 ERROR lbrynet.daemon.Components:782: failed to setup upnp
Unhandled error in Deferred:
2018-10-28 21:40:32,480 CRITICAL twisted:154: Unhandled error in Deferred:
2018-10-28 21:40:32,481 CRITICAL twisted:154:
Traceback (most recent call last):
File "twisted\internet\defer.py", line 1418, in _inlineCallbacks
File "lbrynet\dht\node.py", line 635, in _iterativeFind
File "twisted\internet\defer.py", line 1362, in returnValue
twisted.internet.defer._DefGen_Return: [<lbrynet.dht.contact._Contact object at 0x067573D0>, <lbrynet.dht.contact._Contact object at 0x06796470>, <lbrynet.dht.contact._Contact object at 0x064A5B30>, <lbrynet.dht.contact._Contact object at 0x067798B0>, <lbrynet.dht.contact._Contact object at 0x06793DB0>, <lbrynet.dht.contact._Contact object at 0x0647D510>, <lbrynet.dht.contact._Contact object at 0x06779890>, <lbrynet.dht.contact._Contact object at 0x06488CF0>]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "twisted\internet\defer.py", line 1416, in _inlineCallbacks
File "twisted\python\failure.py", line 491, in throwExceptionIntoGenerator
File "lbrynet\dht\hashannouncer.py", line 38, in do_store
File "twisted\internet\defer.py", line 1418, in _inlineCallbacks
File "lbrynet\dht\node.py", line 326, in announceHaveBlob
Exception: Cannot determine external IP: None
Unhandled error in Deferred:
|
Exception
|
def _maintain_redirects(self):
# setup the gateway if necessary
if not self.upnp:
try:
self.upnp = yield from_future(UPnP.discover())
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err:
log.warning("upnp discovery failed: %s", err)
self.upnp = None
# update the external ip
external_ip = None
if self.upnp:
try:
external_ip = yield from_future(self.upnp.get_external_ip())
if external_ip != "0.0.0.0":
log.info("got external ip from UPnP: %s", external_ip)
except (asyncio.TimeoutError, UPnPError):
pass
if external_ip == "0.0.0.0" or not external_ip:
log.warning("unable to get external ip from UPnP, checking lbry.io fallback")
external_ip = CS.get_external_ip()
if self.external_ip and self.external_ip != external_ip:
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
self.external_ip = external_ip
assert self.external_ip is not None # TODO: handle going/starting offline
if not self.upnp_redirects and self.upnp: # setup missing redirects
try:
log.info("add UPnP port mappings")
upnp_redirects = yield DeferredDict(
{
"UDP": from_future(
self.upnp.get_next_mapping(
self._int_dht_node_port, "UDP", "LBRY DHT port"
)
),
"TCP": from_future(
self.upnp.get_next_mapping(
self._int_peer_port, "TCP", "LBRY peer port"
)
),
}
)
log.info("set up redirects: %s", upnp_redirects)
self.upnp_redirects.update(upnp_redirects)
except (asyncio.TimeoutError, UPnPError):
self.upnp = None
return self._maintain_redirects()
elif self.upnp: # check existing redirects are still active
found = set()
mappings = yield from_future(self.upnp.get_redirects())
for mapping in mappings:
proto = mapping["NewProtocol"]
if (
proto in self.upnp_redirects
and mapping["NewExternalPort"] == self.upnp_redirects[proto]
):
if mapping["NewInternalClient"] == self.upnp.lan_address:
found.add(proto)
if "UDP" not in found:
try:
udp_port = yield from_future(
self.upnp.get_next_mapping(
self._int_dht_node_port, "UDP", "LBRY DHT port"
)
)
self.upnp_redirects["UDP"] = udp_port
log.info("refreshed upnp redirect for dht port: %i", udp_port)
except (asyncio.TimeoutError, UPnPError):
del self.upnp_redirects["UDP"]
if "TCP" not in found:
try:
tcp_port = yield from_future(
self.upnp.get_next_mapping(
self._int_peer_port, "TCP", "LBRY peer port"
)
)
self.upnp_redirects["TCP"] = tcp_port
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
except (asyncio.TimeoutError, UPnPError):
del self.upnp_redirects["TCP"]
if "TCP" in self.upnp_redirects and "UDP" in self.upnp_redirects:
log.debug("upnp redirects are still active")
|
def _maintain_redirects(self):
# setup the gateway if necessary
if not self.upnp:
try:
self.upnp = yield from_future(UPnP.discover())
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err:
log.warning("upnp discovery failed: %s", err)
return
# update the external ip
try:
external_ip = yield from_future(self.upnp.get_external_ip())
if external_ip == "0.0.0.0":
log.warning(
"upnp doesn't know the external ip address (returned 0.0.0.0), using fallback"
)
external_ip = CS.get_external_ip()
if self.external_ip and self.external_ip != external_ip:
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
elif not self.external_ip:
log.info("got external ip: %s", external_ip)
self.external_ip = external_ip
except (asyncio.TimeoutError, UPnPError):
pass
if not self.upnp_redirects: # setup missing redirects
try:
upnp_redirects = yield DeferredDict(
{
"UDP": from_future(
self.upnp.get_next_mapping(
self._int_dht_node_port, "UDP", "LBRY DHT port"
)
),
"TCP": from_future(
self.upnp.get_next_mapping(
self._int_peer_port, "TCP", "LBRY peer port"
)
),
}
)
self.upnp_redirects.update(upnp_redirects)
except (asyncio.TimeoutError, UPnPError):
self.upnp = None
return self._maintain_redirects()
else: # check existing redirects are still active
found = set()
mappings = yield from_future(self.upnp.get_redirects())
for mapping in mappings:
proto = mapping["NewProtocol"]
if (
proto in self.upnp_redirects
and mapping["NewExternalPort"] == self.upnp_redirects[proto]
):
if mapping["NewInternalClient"] == self.upnp.lan_address:
found.add(proto)
if "UDP" not in found:
try:
udp_port = yield from_future(
self.upnp.get_next_mapping(
self._int_dht_node_port, "UDP", "LBRY DHT port"
)
)
self.upnp_redirects["UDP"] = udp_port
log.info("refreshed upnp redirect for dht port: %i", udp_port)
except (asyncio.TimeoutError, UPnPError):
del self.upnp_redirects["UDP"]
if "TCP" not in found:
try:
tcp_port = yield from_future(
self.upnp.get_next_mapping(
self._int_peer_port, "TCP", "LBRY peer port"
)
)
self.upnp_redirects["TCP"] = tcp_port
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
except (asyncio.TimeoutError, UPnPError):
del self.upnp_redirects["TCP"]
if "TCP" in self.upnp_redirects and "UDP" in self.upnp_redirects:
log.debug("upnp redirects are still active")
|
https://github.com/lbryio/lbry-sdk/issues/1561
|
2018-10-28 21:39:16,952 WARNING lbrynet.daemon.Components:712: upnp discovery failed: {http://schemas.microsoft.com/windows/pnpx/2005/11}X_hardwareId
2018-10-28 21:39:16,953 ERROR lbrynet.daemon.Components:782: failed to setup upnp
Unhandled error in Deferred:
2018-10-28 21:40:32,480 CRITICAL twisted:154: Unhandled error in Deferred:
2018-10-28 21:40:32,481 CRITICAL twisted:154:
Traceback (most recent call last):
File "twisted\internet\defer.py", line 1418, in _inlineCallbacks
File "lbrynet\dht\node.py", line 635, in _iterativeFind
File "twisted\internet\defer.py", line 1362, in returnValue
twisted.internet.defer._DefGen_Return: [<lbrynet.dht.contact._Contact object at 0x067573D0>, <lbrynet.dht.contact._Contact object at 0x06796470>, <lbrynet.dht.contact._Contact object at 0x064A5B30>, <lbrynet.dht.contact._Contact object at 0x067798B0>, <lbrynet.dht.contact._Contact object at 0x06793DB0>, <lbrynet.dht.contact._Contact object at 0x0647D510>, <lbrynet.dht.contact._Contact object at 0x06779890>, <lbrynet.dht.contact._Contact object at 0x06488CF0>]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "twisted\internet\defer.py", line 1416, in _inlineCallbacks
File "twisted\python\failure.py", line 491, in throwExceptionIntoGenerator
File "lbrynet\dht\hashannouncer.py", line 38, in do_store
File "twisted\internet\defer.py", line 1418, in _inlineCallbacks
File "lbrynet\dht\node.py", line 326, in announceHaveBlob
Exception: Cannot determine external IP: None
Unhandled error in Deferred:
|
Exception
|
def start(self):
log.info("detecting external ip")
if not self.use_upnp:
self.external_ip = CS.get_external_ip()
return
success = False
yield self._maintain_redirects()
if self.upnp:
if not self.upnp_redirects:
log.error(
"failed to setup upnp, debugging infomation: %s",
self.upnp.zipped_debugging_info,
)
else:
success = True
log.debug(
"set up upnp port redirects for gateway: %s",
self.upnp.gateway.manufacturer_string,
)
else:
log.error("failed to setup upnp")
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
success, self.get_status()
)
self._maintain_redirects_lc.start(360, now=False)
|
def start(self):
if not self.use_upnp:
self.external_ip = CS.get_external_ip()
return
success = False
yield self._maintain_redirects()
if self.upnp:
if not self.upnp_redirects:
log.error(
"failed to setup upnp, debugging infomation: %s",
self.upnp.zipped_debugging_info,
)
else:
success = True
log.debug(
"set up upnp port redirects for gateway: %s",
self.upnp.gateway.manufacturer_string,
)
else:
log.error("failed to setup upnp")
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
success, self.get_status()
)
self._maintain_redirects_lc.start(360, now=False)
|
https://github.com/lbryio/lbry-sdk/issues/1561
|
2018-10-28 21:39:16,952 WARNING lbrynet.daemon.Components:712: upnp discovery failed: {http://schemas.microsoft.com/windows/pnpx/2005/11}X_hardwareId
2018-10-28 21:39:16,953 ERROR lbrynet.daemon.Components:782: failed to setup upnp
Unhandled error in Deferred:
2018-10-28 21:40:32,480 CRITICAL twisted:154: Unhandled error in Deferred:
2018-10-28 21:40:32,481 CRITICAL twisted:154:
Traceback (most recent call last):
File "twisted\internet\defer.py", line 1418, in _inlineCallbacks
File "lbrynet\dht\node.py", line 635, in _iterativeFind
File "twisted\internet\defer.py", line 1362, in returnValue
twisted.internet.defer._DefGen_Return: [<lbrynet.dht.contact._Contact object at 0x067573D0>, <lbrynet.dht.contact._Contact object at 0x06796470>, <lbrynet.dht.contact._Contact object at 0x064A5B30>, <lbrynet.dht.contact._Contact object at 0x067798B0>, <lbrynet.dht.contact._Contact object at 0x06793DB0>, <lbrynet.dht.contact._Contact object at 0x0647D510>, <lbrynet.dht.contact._Contact object at 0x06779890>, <lbrynet.dht.contact._Contact object at 0x06488CF0>]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "twisted\internet\defer.py", line 1416, in _inlineCallbacks
File "twisted\python\failure.py", line 491, in throwExceptionIntoGenerator
File "lbrynet\dht\hashannouncer.py", line 38, in do_store
File "twisted\internet\defer.py", line 1418, in _inlineCallbacks
File "lbrynet\dht\node.py", line 326, in announceHaveBlob
Exception: Cannot determine external IP: None
Unhandled error in Deferred:
|
Exception
|
def _batched_select(transaction, query, parameters):
for start_index in range(0, len(parameters), 900):
current_batch = parameters[start_index : start_index + 900]
bind = "({})".format(",".join(["?"] * len(current_batch)))
for result in transaction.execute(query.format(bind), current_batch):
yield result
|
def _batched_select(transaction, query, parameters):
for start_index in range(0, len(parameters), 900):
current_batch = parameters[start_index : start_index + 900]
bind = "({})".format(",".join("?" for _ in range(len(current_batch))))
for result in transaction.execute(query.format(bind), current_batch):
yield result
|
https://github.com/lbryio/lbry-sdk/issues/1331
|
2018-08-01 19:15:31,421 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to initializing
2018-08-01 19:15:31,424 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to downloading_metadata
2018-08-01 19:15:32,237 INFO lbrynet.core.client.BlobRequester:417: Offered rate 0.000000/mb accepted by 47.17.162.103
2018-08-01 19:15:32,239 INFO lbrynet.core.client.BlobRequester:490: Requesting blob e6cff236d7c84a2a848de826f223a9ca9d7b80186f8b5c1b583ea9373a8e9781f64ac4ca1b1d6cb4db81af74c28ee57b from 47.17.162.103:3335
2018-08-01 19:15:32,247 INFO lbrynet.core.client.BlobRequester:566: Blob e6cff236d7c84a2a has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:32,447 INFO lbrynet.daemon.Downloader:186: Downloading lbry://getbitcoincashoutofelectrumin60seconds (e6cff2) --> D:\LBRY Downloads Main PC\Get Bitcoin Cash out of Electrum in 60 Seconds.mp4
2018-08-01 19:15:32,447 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to running
2018-08-01 19:15:33,174 INFO lbrynet.core.client.BlobRequester:417: Offered rate 0.000000/mb accepted by 47.17.162.103
2018-08-01 19:15:33,174 INFO lbrynet.core.client.BlobRequester:490: Requesting blob 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb from 47.17.162.103:3335
2018-08-01 19:15:33,507 INFO lbrynet.core.client.BlobRequester:566: Blob 34f342bbc6a55124 has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:33,801 INFO lbrynet.core.client.BlobRequester:490: Requesting blob c4270404dbb1040579db29e03dd63402fda49c34a3fbd3b4a95814b3692a685fd866ae83097871b8b10077362aec530a from 47.17.162.103:3335
2018-08-01 19:15:34,086 INFO lbrynet.core.client.BlobRequester:566: Blob c4270404dbb10405 has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:34,089 INFO lbrynet.core.client.BlobRequester:490: Requesting blob 2f41b535acff0046f5f5b12c879bb1b35fc71ffba00291602e6c5ebaf88d2553020eaf6e77f21ab9c3097f83186050da from 47.17.162.103:3335
2018-08-01 19:15:34,190 INFO lbrynet.core.client.BlobRequester:566: Blob 2f41b535acff0046 has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:34,493 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to stopped
2018-08-01 19:15:34,493 INFO lbrynet.daemon.Downloader:149: Finished downloading lbry://getbitcoincashoutofelectrumin60seconds (e6cff2) --> D:\LBRY Downloads Main PC\Get Bitcoin Cash out of Electrum in 60 Seconds.mp4
2018-08-01 19:15:39,910 INFO lbrynet.daemon.Daemon:1805: Deleted file: Get Bitcoin Cash out of Electrum in 60 Seconds.mp4
2018-08-01 19:15:48,651 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to initializing
2018-08-01 19:15:48,651 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to downloading_metadata
2018-08-01 19:15:49,378 INFO lbrynet.core.client.BlobRequester:417: Offered rate 0.000000/mb accepted by 47.17.162.103
2018-08-01 19:15:49,378 INFO lbrynet.core.client.BlobRequester:490: Requesting blob e6cff236d7c84a2a848de826f223a9ca9d7b80186f8b5c1b583ea9373a8e9781f64ac4ca1b1d6cb4db81af74c28ee57b from 47.17.162.103:3335
2018-08-01 19:15:49,384 INFO lbrynet.core.client.BlobRequester:566: Blob e6cff236d7c84a2a has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:49,687 INFO lbrynet.daemon.Downloader:186: Downloading lbry://getbitcoincashoutofelectrumin60seconds (e6cff2) --> D:\LBRY Downloads Main PC\Get Bitcoin Cash out of Electrum in 60 Seconds.mp4
2018-08-01 19:15:49,687 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to running
2018-08-01 19:15:50,513 INFO lbrynet.core.client.BlobRequester:417: Offered rate 0.000000/mb accepted by 47.17.162.103
2018-08-01 19:15:50,513 INFO lbrynet.core.client.BlobRequester:490: Requesting blob 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb from 47.17.162.103:3335
2018-08-01 19:15:50,831 INFO lbrynet.core.client.BlobRequester:566: Blob 34f342bbc6a55124 has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:50,834 INFO lbrynet.core.client.BlobRequester:490: Requesting blob c4270404dbb1040579db29e03dd63402fda49c34a3fbd3b4a95814b3692a685fd866ae83097871b8b10077362aec530a from 47.17.162.103:3335
2018-08-01 19:15:51,167 INFO lbrynet.core.client.BlobRequester:566: Blob c4270404dbb10405 has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:51,168 INFO lbrynet.core.client.BlobRequester:490: Requesting blob 2f41b535acff0046f5f5b12c879bb1b35fc71ffba00291602e6c5ebaf88d2553020eaf6e77f21ab9c3097f83186050da from 47.17.162.103:3335
2018-08-01 19:15:51,279 INFO lbrynet.core.client.BlobRequester:566: Blob 2f41b535acff0046 has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:15:51,767 INFO lbrynet.dht.hashannouncer:54: Announcing 2 blobs
2018-08-01 19:15:51,779 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to stopped
2018-08-01 19:15:51,779 INFO lbrynet.daemon.Downloader:149: Finished downloading lbry://getbitcoincashoutofelectrumin60seconds (e6cff2) --> D:\LBRY Downloads Main PC\Get Bitcoin Cash out of Electrum in 60 Seconds.mp4
2018-08-01 19:15:57,322 INFO lbrynet.dht.hashannouncer:72: Took 5.55400013924 seconds to announce 2 of 2 attempted hashes (0.000000 hashes per second)
2018-08-01 19:15:59,857 INFO lbrynet.daemon.Daemon:1805: Deleted file: Get Bitcoin Cash out of Electrum in 60 Seconds.mp4
2018-08-01 19:16:18,078 INFO lbrynet.core.Wallet:411: Skipping dust
2018-08-01 19:16:18,749 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to initializing
2018-08-01 19:16:18,750 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to downloading_metadata
2018-08-01 19:16:19,477 INFO lbrynet.core.client.BlobRequester:417: Offered rate 0.000000/mb accepted by 47.17.162.103
2018-08-01 19:16:19,479 INFO lbrynet.core.client.BlobRequester:490: Requesting blob e6cff236d7c84a2a848de826f223a9ca9d7b80186f8b5c1b583ea9373a8e9781f64ac4ca1b1d6cb4db81af74c28ee57b from 47.17.162.103:3335
2018-08-01 19:16:19,486 INFO lbrynet.core.client.BlobRequester:566: Blob e6cff236d7c84a2a has been successfully downloaded from 47.17.162.103:3335
2018-08-01 19:16:19,776 INFO lbrynet.daemon.Downloader:186: Downloading lbry://getbitcoincashoutofelectrumin60seconds (e6cff2) --> D:\LBRY Downloads Main PC\Get Bitcoin Cash out of Electrum in 60 Seconds.mp4
2018-08-01 19:16:19,776 INFO lbrynet.daemon.Downloader:93: Download lbry://getbitcoincashoutofelectrumin60seconds status changed to running
2018-08-01 19:16:20,522 INFO lbrynet.core.client.BlobRequester:417: Offered rate 0.000000/mb accepted by 47.17.162.103
2018-08-01 19:16:20,523 INFO lbrynet.core.client.BlobRequester:490: Requesting blob 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb from 47.17.162.103:3335
2018-08-01 19:16:20,838 WARNING lbrynet.core.client.BlobRequester:293: An error occurred while downloading 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb from 47.17.162.103:3335. Error: Traceback (most recent call last):
Failure: lbrynet.core.Error.InvalidDataError: blob hash is bb6a484ebae897012c6bae1a31a7b0ffa96521574b0a620d569d6120730185458537e08ba776160dfc5b7dc39b6936ca vs expected 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb
2018-08-01 19:16:20,838 WARNING lbrynet.core.client.ClientProtocol:188: The connection to 47.17.162.103:3335 is closing due to: [Failure instance: Traceback (failure with no frames): <class 'lbrynet.core.Error.InvalidDataError'>: blob hash is bb6a484ebae897012c6bae1a31a7b0ffa96521574b0a620d569d6120730185458537e08ba776160dfc5b7dc39b6936ca vs expected 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb
]
2018-08-01 19:16:20,838 WARNING lbrynet.core.client.ClientProtocol:188: The connection to 47.17.162.103:3335 is closing due to: [Failure instance: Traceback (failure with no frames): <class 'lbrynet.core.Error.InvalidDataError'>: blob hash is bb6a484ebae897012c6bae1a31a7b0ffa96521574b0a620d569d6120730185458537e08ba776160dfc5b7dc39b6936ca vs expected 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb
]
2018-08-01 19:16:20,839 INFO lbrynet.core.client.ClientProtocol:229: blob hash is bb6a484ebae897012c6bae1a31a7b0ffa96521574b0a620d569d6120730185458537e08ba776160dfc5b7dc39b6936ca vs expected 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb
2018-08-01 19:16:20,839 INFO lbrynet.core.client.ClientProtocol:231: The connection to 47.17.162.103:3335 is closing due to an error: Traceback (most recent call last):
Failure: lbrynet.core.Error.InvalidDataError: blob hash is bb6a484ebae897012c6bae1a31a7b0ffa96521574b0a620d569d6120730185458537e08ba776160dfc5b7dc39b6936ca vs expected 34f342bbc6a55124bac0790a8bb49bae9f19d60ffda10db4f9b6529bb502551336dee6afc0e091321322a34772628ccb
2018-08-01 19:16:20,839 WARNING lbrynet.core.client.ClientProtocol:238: Not asking for another request from 47.17.162.103:3335
2018-08-01 19:16:21,746 INFO lbrynet.dht.hashannouncer:54: Announcing 1 blobs
2018-08-01 19:16:26,589 INFO lbrynet.dht.hashannouncer:72: Took 4.84200000763 seconds to announce 1 of 1 attempted hashes (0.000000 hashes per second)
2018-08-01 19:17:18,285 INFO lbrynet.core.Wallet:411: Skipping dust
2018-08-01 19:17:28,733 INFO lbrynet.daemon.Daemon:1692: Already waiting on lbry://getbitcoincashoutofelectrumin60seconds to start downloading
2018-08-01 19:18:05,559 INFO lbrynet.daemon.DaemonControl:84: Starting lbrynet-daemon from command line
2018-08-01 19:18:17,280 INFO lbrynet.database.storage:174: connecting to database: C:\Users\thoma\AppData\Roaming\lbrynet\lbrynet.sqlite
2018-08-01 19:18:17,282 INFO lbrynet.daemon.DaemonServer:65: Using non-authenticated API
2018-08-01 19:18:17,283 INFO lbrynet.daemon.DaemonServer:42: lbrynet API listening on TCP localhost:5279
2018-08-01 19:18:17,283 INFO lbrynet.daemon.Daemon:242: Starting lbrynet-daemon
2018-08-01 19:18:17,305 INFO lbrynet.daemon.ExchangeRateManager:217: Starting exchange rate manager
2018-08-01 19:18:18,451 INFO lbrynet.daemon.Daemon:284: Platform: {"lbrynet_version": "0.20.4", "platform": "Windows-10-10.0.15063", "os_system": "Windows", "python_version": "2.7.15", "os_release": "10", "lbryum_version": "3.2.3", "ip": "47.17.162.103", "lbryschema_version": "0.0.16", "processor": "Intel64 Family 6 Model 60 Stepping 3, GenuineIntel", "build": "release"}
2018-08-01 19:18:18,453 INFO lbrynet.daemon.Daemon:487: Loading databases
2018-08-01 19:18:18,459 INFO lbrynet.daemon.Daemon:1141: Get version info: {"lbrynet_version": "0.20.4", "platform": "Windows-10-10.0.15063", "os_system": "Windows", "python_version": "2.7.15", "os_release": "10", "lbryum_version": "3.2.3", "ip": "47.17.162.103", "lbryschema_version": "0.0.16", "processor": "Intel64 Family 6 Model 60 Stepping 3, GenuineIntel", "build": "release"}
2018-08-01 19:18:18,469 INFO lbrynet.daemon.Daemon:542: Using lbryum wallet
2018-08-01 19:18:18,493 INFO lbrynet.core.Session:185: Set UPnP redirect 47.17.162.103:3222 (TCP) to 192.168.0.6:3222
2018-08-01 19:18:18,509 INFO lbrynet.core.Session:185: Set UPnP redirect 47.17.162.103:44445 (UDP) to 192.168.0.6:44445
2018-08-01 19:18:19,532 INFO lbrynet.dht.protocol:202: DHT listening on UDP 47.17.162.103:44445
2018-08-01 19:18:19,532 INFO lbrynet.core.RateLimiter:72: Starting rate limiter.
2018-08-01 19:18:19,619 INFO lbrynet.dht.node:249: Attempting to join the DHT network, 0 contacts known so far
2018-08-01 19:18:20,349 INFO lbrynet.core.Wallet:157: lbryumx2.lbry.io:50001 height: 412966, local height: 412966
2018-08-01 19:18:20,349 INFO lbrynet.core.Wallet:196: Starting wallet.
2018-08-01 19:18:20,448 INFO lbrynet.core.Wallet:984: Loading the wallet
2018-08-01 19:18:22,502 WARNING lbrynet.core.Wallet:1066: Your wallet is excessively large (3054 addresses), please follow instructions here: https://github.com/lbryio/lbry/issues/437 to reduce your wallet size
2018-08-01 19:18:25,852 INFO lbrynet.core.Wallet:1081: Local Height: 412966, remote height: 412966, behind: 0
2018-08-01 19:18:25,854 INFO lbrynet.core.Wallet:1089: Wallet Loaded
2018-08-01 19:18:25,854 INFO lbrynet.core.Wallet:1007: Subscribing to addresses
2018-08-01 19:18:26,161 INFO lbrynet.core.Wallet:1009: Synchronized wallet
2018-08-01 19:18:26,161 INFO lbrynet.core.Wallet:1011: Set up lbryum command runner
2018-08-01 19:18:26,170 INFO lbrynet.core.Wallet:957: Wallet is not encrypted
2018-08-01 19:18:26,180 INFO lbrynet.daemon.Daemon:526: Starting the file manager
2018-08-01 19:18:26,194 ERROR lbrynet.daemon.DaemonControl:118: Failed to start lbrynet-daemon
Traceback (most recent call last):
File "site-packages\lbrynet\daemon\DaemonControl.py", line 115, in start_server_and_listen
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\daemon\DaemonServer.py", line 52, in start
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\daemon\Daemon.py", line 257, in setup
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\daemon\Daemon.py", line 529, in _setup_lbry_file_manager
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\file_manager\EncryptedFileManager.py", line 46, in setup
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\file_manager\EncryptedFileManager.py", line 119, in _start_lbry_files
File "site-packages\twisted\internet\defer.py", line 1297, in _inlineCallbacks
File "site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
File "site-packages\lbrynet\database\storage.py", line 752, in get_claims_from_stream_hashes
File "site-packages\twisted\python\threadpool.py", line 246, in inContext
File "site-packages\twisted\python\threadpool.py", line 262, in <lambda>
File "site-packages\twisted\python\context.py", line 118, in callWithContext
File "site-packages\twisted\python\context.py", line 81, in callWithContext
File "site-packages\twisted\enterprise\adbapi.py", line 477, in _runInteraction
File "site-packages\twisted\enterprise\adbapi.py", line 467, in _runInteraction
File "site-packages\lbrynet\database\storage.py", line 731, in _batch_get_claim
OperationalError: too many SQL variables
2018-08-01 19:18:26,214 INFO lbrynet.daemon.DaemonServer:56: Stopping the reactor
2018-08-01 19:18:26,214 INFO lbrynet.daemon.Daemon:420: Closing lbrynet session
2018-08-01 19:18:26,214 INFO lbrynet.daemon.Daemon:421: Status at time of shutdown: loading_file_manager
2018-08-01 19:18:26,214 INFO lbrynet.file_manager.EncryptedFileManager:148: Stopping 0 lbry files
2018-08-01 19:18:26,214 INFO lbrynet.file_manager.EncryptedFileManager:252: Stopped encrypted file manager
2018-08-01 19:18:26,214 INFO lbrynet.core.Session:141: Stopping session.
2018-08-01 19:18:26,214 INFO lbrynet.core.RateLimiter:83: Stopping rate limiter.
2018-08-01 19:18:26,214 INFO lbrynet.core.Wallet:208: Stopping wallet.
2018-08-01 19:18:26,280 INFO lbrynet.core.Wallet:1036: Stopped wallet
2018-08-01 19:18:26,280 INFO lbrynet.core.Wallet:1039: Stopped connection to lbryum server
2018-08-01 19:18:26,280 INFO lbrynet.core.Session:263: Unsetting upnp for session
2018-08-01 19:18:26,282 INFO lbrynet.dht.protocol:493: Stopping DHT
2018-08-01 19:18:26,282 INFO lbrynet.dht.protocol:496: DHT stopped
2018-08-01 19:18:26,299 INFO lbrynet.core.Session:277: Removed UPnP redirect for TCP 3222.
2018-08-01 19:18:26,313 INFO lbrynet.core.Session:277: Removed UPnP redirect for UDP 44445.
2018-08-01 19:18:26,480 WARNING lbrynet.analytics:257: Failed to send track event. Traceback (most recent call last):
Failure: twisted.web._newclient.RequestTransmissionFailed: [<twisted.python.failure.Failure twisted.internet.error.ConnectionLost: Connection to the other side was lost in a non-clean fashion: Connection lost.>]
({'timestamp': '2018-08-01T23:18:26.205000Z', 'userId': 'lbry', 'event': 'Server Startup Error', 'context': {'app': {'build': 'release', 'version': '0.20.4', 'name': 'lbrynet', 'wallet': {'version': '3.2.3', 'name': 'lbryum'}, 'python_version': '2.7.15'}, 'os': {'version': '10', 'name': 'Windows'}, 'library': {'version': '1.0.0', 'name': 'lbrynet-analytics'}}, 'properties': {'message': 'too many SQL variables', 'lbry_id': '3vbBV2oD6JvWcGsfWrhdfwKhqSD2bcqeZbMST4EGwXCS49Xf4R9tUaozmM7ZvAZ13B', 'session_id': '8zDSvzWrff3UfzWWAsEQBgWDzhPZHNAWvsgYpiN5QbapUFtxqTr6o1Lvx3vTimraWt'}})
2018-08-01 19:18:26,482 CRITICAL twisted:154: Unexpected exception from twisted.web.client.FileBodyProducer.stopProducing
Traceback (most recent call last):
File "site-packages\twisted\internet\endpoints.py", line 130, in connectionLost
File "site-packages\twisted\web\_newclient.py", line 929, in dispatcher
File "site-packages\twisted\web\_newclient.py", line 1599, in _connectionLost_TRANSMITTING
File "site-packages\twisted\web\_newclient.py", line 830, in stopWriting
--- <exception caught here> ---
File "site-packages\twisted\web\_newclient.py", line 194, in _callAppFunction
File "site-packages\twisted\web\client.py", line 1041, in stopProducing
File "site-packages\twisted\internet\task.py", line 497, in stop
File "site-packages\twisted\internet\task.py", line 507, in _checkFinish
twisted.internet.task.TaskStopped:
2018-08-01 19:18:26,483 CRITICAL twisted:154: Unhandled error in Deferred:
2018-08-01 19:18:26,483 CRITICAL twisted:154:
RequestTransmissionFailed: [<twisted.python.failure.Failure twisted.internet.error.ConnectionLost: Connection to the other side was lost in a non-clean fashion: Connection lost.>]
2018-08-01 19:18:26,483 CRITICAL twisted:154: Unexpected exception from twisted.web.client.FileBodyProducer.stopProducing
Traceback (most recent call last):
File "site-packages\twisted\internet\endpoints.py", line 130, in connectionLost
File "site-packages\twisted\web\_newclient.py", line 929, in dispatcher
File "site-packages\twisted\web\_newclient.py", line 1599, in _connectionLost_TRANSMITTING
File "site-packages\twisted\web\_newclient.py", line 830, in stopWriting
--- <exception caught here> ---
File "site-packages\twisted\web\_newclient.py", line 194, in _callAppFunction
File "site-packages\twisted\web\client.py", line 1041, in stopProducing
File "site-packages\twisted\internet\task.py", line 497, in stop
File "site-packages\twisted\internet\task.py", line 507, in _checkFinish
twisted.internet.task.TaskStopped:
2018-08-01 19:18:26,484 WARNING lbrynet.analytics:257: Failed to send track event. Traceback (most recent call last):
Failure: twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure twisted.internet.error.ConnectionLost: Connection to the other side was lost in a non-clean fashion: Connection lost.>]
({'timestamp': '2018-08-01T23:18:26.171000Z', 'userId': 'lbry', 'event': 'Heartbeat', 'context': {'app': {'build': 'release', 'version': '0.20.4', 'name': 'lbrynet', 'wallet': {'version': '3.2.3', 'name': 'lbryum'}, 'python_version': '2.7.15'}, 'os': {'version': '10', 'name': 'Windows'}, 'library': {'version': '1.0.0', 'name': 'lbrynet-analytics'}}, 'properties': {'lbry_id': '3vbBV2oD6JvWcGsfWrhdfwKhqSD2bcqeZbMST4EGwXCS49Xf4R9tUaozmM7ZvAZ13B', 'session_id': '8zDSvzWrff3UfzWWAsEQBgWDzhPZHNAWvsgYpiN5QbapUFtxqTr6o1Lvx3vTimraWt'}})
|
OperationalError
|
def _refreshContacts(self):
self._protocol._ping_queue.enqueue_maybe_ping(*self.contacts, delay=0)
|
def _refreshContacts(self):
self._protocol._ping_queue.enqueue_maybe_ping(*self.contacts)
|
https://github.com/lbryio/lbry-sdk/issues/1439
|
2018-09-17 05:43:51,637 INFO lbrynet.reflector.client.client:74: Reflector has all blobs for b41893e22fa11414
2018-09-17 05:43:51,641 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee2b8996e30e9f6b to reflect, 120 are still needed
2018-09-17 05:43:51,675 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee7458aa0b1fb469 to reflect, 1 are still needed
2018-09-17 05:52:25,446 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:53:05,716 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:46,935 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,377 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,613 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,614 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,794 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,796 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,797 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,004 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,006 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,007 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,062 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,071 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,194 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,195 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,196 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,430 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,431 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,486 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,488 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,547 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,024 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,025 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,027 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,262 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,263 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,136 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,138 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:25,263 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:27,106 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:55,473 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
Unhandled error in Deferred:
2018-09-17 06:39:55,637 CRITICAL twisted:154: Unhandled error in Deferred:
Fatal Python error: Cannot recover from stack overflow.
Thread 0x00008c88 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026e4 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00001b8c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002a0c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00005acc (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x0000bd54 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006508 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002720 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006c60 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026ec (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000057c0 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006b68 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00008318 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Current thread 0x000027e4 (most recent call first):
File "c:\Python36\lib\traceback.py", line 352 in extract
File "c:\Python36\lib\traceback.py", line 497 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 100 in print_exception
File "logging\__init__.py", line 533 in formatException
File "logging\__init__.py", line 583 in format
File "logging\__init__.py", line 838 in format
File "logging\handlers.py", line 187 in shouldRollover
File "logging\handlers.py", line 71 in emit
File "logging\__init__.py", line 863 in handle
File "logging\__init__.py", line 1514 in callHandlers
File "logging\__init__.py", line 1452 in handle
File "logging\__init__.py", line 1442 in _log
File "logging\__init__.py", line 1372 in log
File "twisted\logger\_stdlib.py", line 116 in __call__
File "twisted\logger\_legacy.py", line 154 in publishToNewObserver
File "twisted\python\log.py", line 595 in emit
File "twisted\logger\_legacy.py", line 93 in __call__
File "twisted\logger\_observer.py", line 131 in __call__
File "twisted\logger\_logger.py", line 144 in emit
File "twisted\logger\_logger.py", line 190 in failure
File "twisted\internet\defer.py", line 964 in __del__
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
...
|
ValueError
|
def _refreshStoringPeers(self):
self._protocol._ping_queue.enqueue_maybe_ping(
*self._dataStore.getStoringContacts(), delay=0
)
|
def _refreshStoringPeers(self):
self._protocol._ping_queue.enqueue_maybe_ping(*self._dataStore.getStoringContacts())
|
https://github.com/lbryio/lbry-sdk/issues/1439
|
2018-09-17 05:43:51,637 INFO lbrynet.reflector.client.client:74: Reflector has all blobs for b41893e22fa11414
2018-09-17 05:43:51,641 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee2b8996e30e9f6b to reflect, 120 are still needed
2018-09-17 05:43:51,675 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee7458aa0b1fb469 to reflect, 1 are still needed
2018-09-17 05:52:25,446 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:53:05,716 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:46,935 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,377 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,613 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,614 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,794 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,796 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,797 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,004 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,006 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,007 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,062 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,071 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,194 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,195 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,196 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,430 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,431 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,486 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,488 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,547 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,024 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,025 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,027 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,262 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,263 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,136 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,138 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:25,263 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:27,106 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:55,473 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
Unhandled error in Deferred:
2018-09-17 06:39:55,637 CRITICAL twisted:154: Unhandled error in Deferred:
Fatal Python error: Cannot recover from stack overflow.
Thread 0x00008c88 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026e4 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00001b8c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002a0c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00005acc (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x0000bd54 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006508 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002720 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006c60 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026ec (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000057c0 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006b68 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00008318 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Current thread 0x000027e4 (most recent call first):
File "c:\Python36\lib\traceback.py", line 352 in extract
File "c:\Python36\lib\traceback.py", line 497 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 100 in print_exception
File "logging\__init__.py", line 533 in formatException
File "logging\__init__.py", line 583 in format
File "logging\__init__.py", line 838 in format
File "logging\handlers.py", line 187 in shouldRollover
File "logging\handlers.py", line 71 in emit
File "logging\__init__.py", line 863 in handle
File "logging\__init__.py", line 1514 in callHandlers
File "logging\__init__.py", line 1452 in handle
File "logging\__init__.py", line 1442 in _log
File "logging\__init__.py", line 1372 in log
File "twisted\logger\_stdlib.py", line 116 in __call__
File "twisted\logger\_legacy.py", line 154 in publishToNewObserver
File "twisted\python\log.py", line 595 in emit
File "twisted\logger\_legacy.py", line 93 in __call__
File "twisted\logger\_observer.py", line 131 in __call__
File "twisted\logger\_logger.py", line 144 in emit
File "twisted\logger\_logger.py", line 190 in failure
File "twisted\internet\defer.py", line 964 in __del__
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
...
|
ValueError
|
def __init__(self, node):
self._node = node
self._enqueued_contacts = {}
self._pending_contacts = {}
self._process_lc = node.get_looping_call(self._process)
|
def __init__(self, node):
self._node = node
self._enqueued_contacts = {}
self._process_lc = node.get_looping_call(self._process)
|
https://github.com/lbryio/lbry-sdk/issues/1439
|
2018-09-17 05:43:51,637 INFO lbrynet.reflector.client.client:74: Reflector has all blobs for b41893e22fa11414
2018-09-17 05:43:51,641 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee2b8996e30e9f6b to reflect, 120 are still needed
2018-09-17 05:43:51,675 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee7458aa0b1fb469 to reflect, 1 are still needed
2018-09-17 05:52:25,446 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:53:05,716 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:46,935 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,377 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,613 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,614 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,794 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,796 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,797 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,004 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,006 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,007 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,062 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,071 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,194 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,195 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,196 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,430 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,431 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,486 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,488 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,547 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,024 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,025 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,027 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,262 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,263 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,136 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,138 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:25,263 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:27,106 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:55,473 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
Unhandled error in Deferred:
2018-09-17 06:39:55,637 CRITICAL twisted:154: Unhandled error in Deferred:
Fatal Python error: Cannot recover from stack overflow.
Thread 0x00008c88 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026e4 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00001b8c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002a0c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00005acc (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x0000bd54 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006508 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002720 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006c60 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026ec (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000057c0 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006b68 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00008318 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Current thread 0x000027e4 (most recent call first):
File "c:\Python36\lib\traceback.py", line 352 in extract
File "c:\Python36\lib\traceback.py", line 497 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 100 in print_exception
File "logging\__init__.py", line 533 in formatException
File "logging\__init__.py", line 583 in format
File "logging\__init__.py", line 838 in format
File "logging\handlers.py", line 187 in shouldRollover
File "logging\handlers.py", line 71 in emit
File "logging\__init__.py", line 863 in handle
File "logging\__init__.py", line 1514 in callHandlers
File "logging\__init__.py", line 1452 in handle
File "logging\__init__.py", line 1442 in _log
File "logging\__init__.py", line 1372 in log
File "twisted\logger\_stdlib.py", line 116 in __call__
File "twisted\logger\_legacy.py", line 154 in publishToNewObserver
File "twisted\python\log.py", line 595 in emit
File "twisted\logger\_legacy.py", line 93 in __call__
File "twisted\logger\_observer.py", line 131 in __call__
File "twisted\logger\_logger.py", line 144 in emit
File "twisted\logger\_logger.py", line 190 in failure
File "twisted\internet\defer.py", line 964 in __del__
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
...
|
ValueError
|
def enqueue_maybe_ping(self, *contacts, **kwargs):
delay = kwargs.get("delay", constants.checkRefreshInterval)
no_op = (defer.succeed(None), lambda: None)
for contact in contacts:
if delay and contact not in self._enqueued_contacts:
self._pending_contacts.setdefault(
contact, self._node.clock.seconds() + delay
)
else:
self._enqueued_contacts.setdefault(contact, no_op)
|
def enqueue_maybe_ping(self, *contacts, **kwargs):
no_op = (defer.succeed(None), lambda: None)
for contact in contacts:
self._enqueued_contacts.setdefault(contact, no_op)
|
https://github.com/lbryio/lbry-sdk/issues/1439
|
2018-09-17 05:43:51,637 INFO lbrynet.reflector.client.client:74: Reflector has all blobs for b41893e22fa11414
2018-09-17 05:43:51,641 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee2b8996e30e9f6b to reflect, 120 are still needed
2018-09-17 05:43:51,675 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee7458aa0b1fb469 to reflect, 1 are still needed
2018-09-17 05:52:25,446 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:53:05,716 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:46,935 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,377 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,613 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,614 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,794 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,796 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,797 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,004 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,006 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,007 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,062 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,071 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,194 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,195 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,196 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,430 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,431 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,486 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,488 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,547 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,024 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,025 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,027 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,262 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,263 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,136 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,138 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:25,263 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:27,106 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:55,473 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
Unhandled error in Deferred:
2018-09-17 06:39:55,637 CRITICAL twisted:154: Unhandled error in Deferred:
Fatal Python error: Cannot recover from stack overflow.
Thread 0x00008c88 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026e4 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00001b8c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002a0c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00005acc (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x0000bd54 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006508 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002720 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006c60 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026ec (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000057c0 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006b68 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00008318 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Current thread 0x000027e4 (most recent call first):
File "c:\Python36\lib\traceback.py", line 352 in extract
File "c:\Python36\lib\traceback.py", line 497 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 100 in print_exception
File "logging\__init__.py", line 533 in formatException
File "logging\__init__.py", line 583 in format
File "logging\__init__.py", line 838 in format
File "logging\handlers.py", line 187 in shouldRollover
File "logging\handlers.py", line 71 in emit
File "logging\__init__.py", line 863 in handle
File "logging\__init__.py", line 1514 in callHandlers
File "logging\__init__.py", line 1452 in handle
File "logging\__init__.py", line 1442 in _log
File "logging\__init__.py", line 1372 in log
File "twisted\logger\_stdlib.py", line 116 in __call__
File "twisted\logger\_legacy.py", line 154 in publishToNewObserver
File "twisted\python\log.py", line 595 in emit
File "twisted\logger\_legacy.py", line 93 in __call__
File "twisted\logger\_observer.py", line 131 in __call__
File "twisted\logger\_logger.py", line 144 in emit
File "twisted\logger\_logger.py", line 190 in failure
File "twisted\internet\defer.py", line 964 in __del__
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
...
|
ValueError
|
def _process(self):
# move contacts that are scheduled to join the queue
if self._pending_contacts:
now = self._node.clock.seconds()
for contact in [
contact
for contact, schedule in self._pending_contacts.items()
if schedule <= now
]:
del self._pending_contacts[contact]
self._enqueued_contacts.setdefault(
contact, (defer.succeed(None), lambda: None)
)
# spread pings across 60 seconds to avoid flood and/or false negatives
step = 60.0 / float(len(self._enqueued_contacts)) if self._enqueued_contacts else 0
for index, (contact, (call, _)) in enumerate(self._enqueued_contacts.items()):
if call.called and not contact.contact_is_good:
self._enqueued_contacts[contact] = self._node.reactor_callLater(
index * step, self._ping, contact
)
|
def _process(self):
if not self._enqueued_contacts:
return
# spread pings across 60 seconds to avoid flood and/or false negatives
step = 60.0 / float(len(self._enqueued_contacts))
for index, (contact, (call, _)) in enumerate(self._enqueued_contacts.items()):
if call.called and not contact.contact_is_good:
self._enqueued_contacts[contact] = self._node.reactor_callLater(
index * step, self._ping, contact
)
|
https://github.com/lbryio/lbry-sdk/issues/1439
|
2018-09-17 05:43:51,637 INFO lbrynet.reflector.client.client:74: Reflector has all blobs for b41893e22fa11414
2018-09-17 05:43:51,641 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee2b8996e30e9f6b to reflect, 120 are still needed
2018-09-17 05:43:51,675 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee7458aa0b1fb469 to reflect, 1 are still needed
2018-09-17 05:52:25,446 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:53:05,716 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:46,935 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,377 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,613 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,614 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,794 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,796 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,797 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,004 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,006 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,007 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,062 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,071 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,194 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,195 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,196 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,430 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,431 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,486 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,488 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,547 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,024 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,025 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,027 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,262 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,263 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,136 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,138 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:25,263 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:27,106 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:55,473 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
Unhandled error in Deferred:
2018-09-17 06:39:55,637 CRITICAL twisted:154: Unhandled error in Deferred:
Fatal Python error: Cannot recover from stack overflow.
Thread 0x00008c88 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026e4 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00001b8c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002a0c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00005acc (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x0000bd54 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006508 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002720 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006c60 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026ec (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000057c0 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006b68 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00008318 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Current thread 0x000027e4 (most recent call first):
File "c:\Python36\lib\traceback.py", line 352 in extract
File "c:\Python36\lib\traceback.py", line 497 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 100 in print_exception
File "logging\__init__.py", line 533 in formatException
File "logging\__init__.py", line 583 in format
File "logging\__init__.py", line 838 in format
File "logging\handlers.py", line 187 in shouldRollover
File "logging\handlers.py", line 71 in emit
File "logging\__init__.py", line 863 in handle
File "logging\__init__.py", line 1514 in callHandlers
File "logging\__init__.py", line 1452 in handle
File "logging\__init__.py", line 1442 in _log
File "logging\__init__.py", line 1372 in log
File "twisted\logger\_stdlib.py", line 116 in __call__
File "twisted\logger\_legacy.py", line 154 in publishToNewObserver
File "twisted\python\log.py", line 595 in emit
File "twisted\logger\_legacy.py", line 93 in __call__
File "twisted\logger\_observer.py", line 131 in __call__
File "twisted\logger\_logger.py", line 144 in emit
File "twisted\logger\_logger.py", line 190 in failure
File "twisted\internet\defer.py", line 964 in __del__
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
...
|
ValueError
|
def stop(self):
map(
None,
(
cancel()
for _, (call, cancel) in self._enqueued_contacts.items()
if not call.called
),
)
return self._node.safe_stop_looping_call(self._process_lc)
|
def stop(self):
return self._node.safe_stop_looping_call(self._process_lc)
|
https://github.com/lbryio/lbry-sdk/issues/1439
|
2018-09-17 05:43:51,637 INFO lbrynet.reflector.client.client:74: Reflector has all blobs for b41893e22fa11414
2018-09-17 05:43:51,641 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee2b8996e30e9f6b to reflect, 120 are still needed
2018-09-17 05:43:51,675 INFO lbrynet.reflector.client.client:77: No more completed blobs for ee7458aa0b1fb469 to reflect, 1 are still needed
2018-09-17 05:52:25,446 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:53:05,716 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 05:54:46,935 ERROR lbrynet.dht.protocol:444: error handling request for 122.162.66.158:4444 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,377 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,613 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:21,614 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,794 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,796 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:22,797 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,004 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,006 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,007 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,062 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:24,071 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,194 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,195 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,196 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,430 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,431 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,486 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,488 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,547 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:25,549 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,024 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:4445 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,025 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,027 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,262 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:26,263 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,136 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53833 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:27:27,138 ERROR lbrynet.dht.protocol:444: error handling request for 85.17.24.157:53835 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:25,263 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:27,106 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
2018-09-17 06:37:55,473 ERROR lbrynet.dht.protocol:444: error handling request for 188.24.157.138:1029 b'store'
Traceback (most recent call last):
File "lbrynet\dht\protocol.py", line 440, in _handleRPC
File "lbrynet\dht\node.py", line 509, in store
ValueError: Invalid token
Unhandled error in Deferred:
2018-09-17 06:39:55,637 CRITICAL twisted:154: Unhandled error in Deferred:
Fatal Python error: Cannot recover from stack overflow.
Thread 0x00008c88 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026e4 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00001b8c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002a0c (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00005acc (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x0000bd54 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006508 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00002720 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006c60 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000026ec (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x000057c0 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00006b68 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Thread 0x00008318 (most recent call first):
File "threading.py", line 295 in wait
File "queue.py", line 164 in get
File "twisted\_threads\_threadworker.py", line 45 in work
File "threading.py", line 864 in run
File "threading.py", line 916 in _bootstrap_inner
File "threading.py", line 884 in _bootstrap
Current thread 0x000027e4 (most recent call first):
File "c:\Python36\lib\traceback.py", line 352 in extract
File "c:\Python36\lib\traceback.py", line 497 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 486 in __init__
File "c:\Python36\lib\traceback.py", line 100 in print_exception
File "logging\__init__.py", line 533 in formatException
File "logging\__init__.py", line 583 in format
File "logging\__init__.py", line 838 in format
File "logging\handlers.py", line 187 in shouldRollover
File "logging\handlers.py", line 71 in emit
File "logging\__init__.py", line 863 in handle
File "logging\__init__.py", line 1514 in callHandlers
File "logging\__init__.py", line 1452 in handle
File "logging\__init__.py", line 1442 in _log
File "logging\__init__.py", line 1372 in log
File "twisted\logger\_stdlib.py", line 116 in __call__
File "twisted\logger\_legacy.py", line 154 in publishToNewObserver
File "twisted\python\log.py", line 595 in emit
File "twisted\logger\_legacy.py", line 93 in __call__
File "twisted\logger\_observer.py", line 131 in __call__
File "twisted\logger\_logger.py", line 144 in emit
File "twisted\logger\_logger.py", line 190 in failure
File "twisted\internet\defer.py", line 964 in __del__
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 311 in addCallbacks
File "twisted\internet\defer.py", line 345 in addBoth
File "twisted\internet\defer.py", line 1653 in execute
File "twisted\internet\defer.py", line 654 in _runCallbacks
File "twisted\internet\defer.py", line 568 in _startRunCallbacks
File "twisted\internet\defer.py", line 460 in callback
File "twisted\internet\defer.py", line 1797 in release
File "twisted\internet\defer.py", line 1625 in _releaseAndReturn
File "twisted\internet\defer.py", line 654 in _runCallbacks
...
|
ValueError
|
def _request_failed(self, reason, request_type):
if reason.check(
DownloadCanceledError,
RequestCanceledError,
ConnectionAborted,
ConnectionClosedBeforeResponseError,
ValueError,
):
return
if reason.check(NoResponseError):
self.requestor._incompatible_peers.append(self.peer)
log.warning(
"A request of type '%s' failed. Reason: %s, Error type: %s",
request_type,
reason.getErrorMessage(),
reason.type,
)
self.update_local_score(-10.0)
if isinstance(reason, (InvalidResponseError, NoResponseError)):
self.peer.update_score(-10.0)
else:
self.peer.update_score(-2.0)
return reason
|
def _request_failed(self, reason, request_type):
if reason.check(
DownloadCanceledError,
RequestCanceledError,
ConnectionAborted,
ConnectionClosedBeforeResponseError,
):
return
if reason.check(NoResponseError):
self.requestor._incompatible_peers.append(self.peer)
log.warning(
"A request of type '%s' failed. Reason: %s, Error type: %s",
request_type,
reason.getErrorMessage(),
reason.type,
)
self.update_local_score(-10.0)
if isinstance(reason, (InvalidResponseError, NoResponseError)):
self.peer.update_score(-10.0)
else:
self.peer.update_score(-2.0)
if reason.check(ConnectionClosedBeforeResponseError):
return
return reason
|
https://github.com/lbryio/lbry-sdk/issues/950
|
2017-10-12 13:52:54,172 INFO lbrynet.core.client.ClientProtocol:185: Closing the connection to x.x.x.x:3333 because the download of blob a5d1ebd863d2b5f7 was canceled
2017-10-12 13:52:54,174 ERROR lbrynet.core.client.ClientProtocol:130: An unexpected error occurred creating or sending a request to x.x.x.x:3333. Error message: Traceback (most recent call last):
File "site-packages/twisted/internet/defer.py", line 651, in _runCallbacks
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 235, in get_next_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 147, in _ask_for_request
File "site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator
--- <exception caught here> ---
File "site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 111, in get_next_request
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 137, in _send_primary_requests
File "site-packages/lbrynet/core/client/BlobRequester.py", line 66, in send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 113, in _send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 440, in make_request_and_handle_response
File "site-packages/lbrynet/core/client/BlobRequester.py", line 493, in _handle_download_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 540, in create_add_blob_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 108, in add_blob_request
exceptions.ValueError: There is already a blob download request active
|
exceptions.ValueError
|
def _handle_download_error(err, peer, blob_to_download):
if not err.check(
DownloadCanceledError,
PriceDisagreementError,
RequestCanceledError,
ConnectionClosedBeforeResponseError,
):
log.warning(
"An error occurred while downloading %s from %s. Error: %s",
blob_to_download.blob_hash,
str(peer),
err.getTraceback(),
)
if err.check(PriceDisagreementError):
# Don't kill the whole connection just because a price couldn't be agreed upon.
# Other information might be desired by other request creators at a better rate.
return True
return err
|
def _handle_download_error(err, peer, blob_to_download):
if not err.check(
DownloadCanceledError, PriceDisagreementError, RequestCanceledError
):
log.warning(
"An error occurred while downloading %s from %s. Error: %s",
blob_to_download.blob_hash,
str(peer),
err.getTraceback(),
)
if err.check(PriceDisagreementError):
# Don't kill the whole connection just because a price couldn't be agreed upon.
# Other information might be desired by other request creators at a better rate.
return True
return err
|
https://github.com/lbryio/lbry-sdk/issues/950
|
2017-10-12 13:52:54,172 INFO lbrynet.core.client.ClientProtocol:185: Closing the connection to x.x.x.x:3333 because the download of blob a5d1ebd863d2b5f7 was canceled
2017-10-12 13:52:54,174 ERROR lbrynet.core.client.ClientProtocol:130: An unexpected error occurred creating or sending a request to x.x.x.x:3333. Error message: Traceback (most recent call last):
File "site-packages/twisted/internet/defer.py", line 651, in _runCallbacks
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 235, in get_next_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 147, in _ask_for_request
File "site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator
--- <exception caught here> ---
File "site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 111, in get_next_request
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 137, in _send_primary_requests
File "site-packages/lbrynet/core/client/BlobRequester.py", line 66, in send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 113, in _send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 440, in make_request_and_handle_response
File "site-packages/lbrynet/core/client/BlobRequester.py", line 493, in _handle_download_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 540, in create_add_blob_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 108, in add_blob_request
exceptions.ValueError: There is already a blob download request active
|
exceptions.ValueError
|
def add_blob_request(self, blob_request):
if self._blob_download_request is None:
d = self.add_request(blob_request)
self._blob_download_request = blob_request
blob_request.finished_deferred.addCallbacks(
self._downloading_finished, self._handle_response_error
)
return d
else:
return defer.fail(ValueError("There is already a blob download request active"))
|
def add_blob_request(self, blob_request):
if self._blob_download_request is None:
d = self.add_request(blob_request)
self._blob_download_request = blob_request
blob_request.finished_deferred.addCallbacks(
self._downloading_finished, self._handle_response_error
)
return d
else:
raise ValueError("There is already a blob download request active")
|
https://github.com/lbryio/lbry-sdk/issues/950
|
2017-10-12 13:52:54,172 INFO lbrynet.core.client.ClientProtocol:185: Closing the connection to x.x.x.x:3333 because the download of blob a5d1ebd863d2b5f7 was canceled
2017-10-12 13:52:54,174 ERROR lbrynet.core.client.ClientProtocol:130: An unexpected error occurred creating or sending a request to x.x.x.x:3333. Error message: Traceback (most recent call last):
File "site-packages/twisted/internet/defer.py", line 651, in _runCallbacks
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 235, in get_next_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 147, in _ask_for_request
File "site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator
--- <exception caught here> ---
File "site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 111, in get_next_request
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 137, in _send_primary_requests
File "site-packages/lbrynet/core/client/BlobRequester.py", line 66, in send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 113, in _send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 440, in make_request_and_handle_response
File "site-packages/lbrynet/core/client/BlobRequester.py", line 493, in _handle_download_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 540, in create_add_blob_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 108, in add_blob_request
exceptions.ValueError: There is already a blob download request active
|
exceptions.ValueError
|
def _handle_request_error(self, err):
log.error(
"An unexpected error occurred creating or sending a request to %s. %s: %s",
self.peer,
err.type,
err.message,
)
self.transport.loseConnection()
|
def _handle_request_error(self, err):
log.error(
"An unexpected error occurred creating or sending a request to %s. Error message: %s",
self.peer,
err.getTraceback(),
)
self.transport.loseConnection()
|
https://github.com/lbryio/lbry-sdk/issues/950
|
2017-10-12 13:52:54,172 INFO lbrynet.core.client.ClientProtocol:185: Closing the connection to x.x.x.x:3333 because the download of blob a5d1ebd863d2b5f7 was canceled
2017-10-12 13:52:54,174 ERROR lbrynet.core.client.ClientProtocol:130: An unexpected error occurred creating or sending a request to x.x.x.x:3333. Error message: Traceback (most recent call last):
File "site-packages/twisted/internet/defer.py", line 651, in _runCallbacks
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 235, in get_next_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 147, in _ask_for_request
File "site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator
--- <exception caught here> ---
File "site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 111, in get_next_request
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 137, in _send_primary_requests
File "site-packages/lbrynet/core/client/BlobRequester.py", line 66, in send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 113, in _send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 440, in make_request_and_handle_response
File "site-packages/lbrynet/core/client/BlobRequester.py", line 493, in _handle_download_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 540, in create_add_blob_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 108, in add_blob_request
exceptions.ValueError: There is already a blob download request active
|
exceptions.ValueError
|
def _handle_response_error(self, err):
# If an error gets to this point, log it and kill the connection.
if err.check(
DownloadCanceledError,
RequestCanceledError,
error.ConnectionAborted,
ConnectionClosedBeforeResponseError,
):
# TODO: (wish-list) it seems silly to close the connection over this, and it shouldn't
# TODO: always be this way. it's done this way now because the client has no other way
# TODO: of telling the server it wants the download to stop. It would be great if the
# TODO: protocol had such a mechanism.
log.info(
"Closing the connection to %s because the download of blob %s was canceled",
self.peer,
self._blob_download_request.blob,
)
result = None
elif err.check(MisbehavingPeerError):
log.warning("The connection to %s is closing due to: %s", self.peer, err)
result = err
else:
log.error(
"The connection to %s is closing due to an unexpected error: %s",
self.peer,
err,
)
result = err
self._blob_download_request = None
self._downloading_blob = False
self.transport.loseConnection()
return result
|
def _handle_response_error(self, err):
# If an error gets to this point, log it and kill the connection.
if err.check(DownloadCanceledError, RequestCanceledError, error.ConnectionAborted):
# TODO: (wish-list) it seems silly to close the connection over this, and it shouldn't
# TODO: always be this way. it's done this way now because the client has no other way
# TODO: of telling the server it wants the download to stop. It would be great if the
# TODO: protocol had such a mechanism.
log.info(
"Closing the connection to %s because the download of blob %s was canceled",
self.peer,
self._blob_download_request.blob,
)
result = None
elif not err.check(MisbehavingPeerError, ConnectionClosedBeforeResponseError):
log.warning("The connection to %s is closing due to: %s", self.peer, err)
result = err
else:
log.error(
"The connection to %s is closing due to an unexpected error: %s",
self.peer,
err,
)
result = err
self.transport.loseConnection()
return result
|
https://github.com/lbryio/lbry-sdk/issues/950
|
2017-10-12 13:52:54,172 INFO lbrynet.core.client.ClientProtocol:185: Closing the connection to x.x.x.x:3333 because the download of blob a5d1ebd863d2b5f7 was canceled
2017-10-12 13:52:54,174 ERROR lbrynet.core.client.ClientProtocol:130: An unexpected error occurred creating or sending a request to x.x.x.x:3333. Error message: Traceback (most recent call last):
File "site-packages/twisted/internet/defer.py", line 651, in _runCallbacks
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 235, in get_next_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 147, in _ask_for_request
File "site-packages/twisted/internet/defer.py", line 1445, in unwindGenerator
--- <exception caught here> ---
File "site-packages/twisted/internet/defer.py", line 1299, in _inlineCallbacks
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 111, in get_next_request
File "site-packages/lbrynet/core/client/ConnectionManager.py", line 137, in _send_primary_requests
File "site-packages/lbrynet/core/client/BlobRequester.py", line 66, in send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 113, in _send_next_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 440, in make_request_and_handle_response
File "site-packages/lbrynet/core/client/BlobRequester.py", line 493, in _handle_download_request
File "site-packages/lbrynet/core/client/BlobRequester.py", line 540, in create_add_blob_request
File "site-packages/lbrynet/core/client/ClientProtocol.py", line 108, in add_blob_request
exceptions.ValueError: There is already a blob download request active
|
exceptions.ValueError
|
def jsonrpc_blob_announce(
self, announce_all=None, blob_hash=None, stream_hash=None, sd_hash=None
):
"""
Announce blobs to the DHT
Usage:
blob_announce [-a] [<blob_hash> | --blob_hash=<blob_hash>]
[<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>]
Options:
-a : announce all the blobs possessed by user
<blob_hash>, --blob_hash=<blob_hash> : announce a blob, specified by blob_hash
<stream_hash>, --stream_hash=<stream_hash> : announce all blobs associated with
stream_hash
<sd_hash>, --sd_hash=<sd_hash> : announce all blobs associated with
sd_hash and the sd_hash itself
Returns:
(bool) true if successful
"""
if announce_all:
yield self.session.blob_manager.immediate_announce_all_blobs()
else:
blob_hashes = []
if blob_hash:
blob_hashes = blob_hashes.append(blob_hashes)
elif stream_hash:
pass
elif sd_hash:
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
else:
raise Exception("single argument must be specified")
if not blob_hash:
blobs = yield self.storage.get_blobs_for_stream(stream_hash)
blob_hashes.extend(
[blob.blob_hash for blob in blobs if blob.get_is_verified()]
)
yield self.session.blob_manager._immediate_announce(blob_hashes)
response = yield self._render_response(True)
defer.returnValue(response)
|
def jsonrpc_blob_announce(
self, announce_all=None, blob_hash=None, stream_hash=None, sd_hash=None
):
"""
Announce blobs to the DHT
Usage:
blob_announce [-a] [<blob_hash> | --blob_hash=<blob_hash>]
[<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>]
Options:
-a : announce all the blobs possessed by user
<blob_hash>, --blob_hash=<blob_hash> : announce a blob, specified by blob_hash
<stream_hash>, --stream_hash=<stream_hash> : announce all blobs associated with
stream_hash
<sd_hash>, --sd_hash=<sd_hash> : announce all blobs associated with
sd_hash and the sd_hash itself
Returns:
(bool) true if successful
"""
if announce_all:
yield self.session.blob_manager.immediate_announce_all_blobs()
else:
if blob_hash:
blob_hashes = [blob_hash]
elif stream_hash:
blobs = yield self.get_blobs_for_stream_hash(stream_hash)
blob_hashes = [blob.blob_hash for blob in blobs if blob.get_is_verified()]
elif sd_hash:
blobs = yield self.get_blobs_for_sd_hash(sd_hash)
blob_hashes = [sd_hash] + [
blob.blob_hash for blob in blobs if blob.get_is_verified()
]
else:
raise Exception("single argument must be specified")
yield self.session.blob_manager._immediate_announce(blob_hashes)
response = yield self._render_response(True)
defer.returnValue(response)
|
https://github.com/lbryio/lbry-sdk/issues/895
|
Here's the traceback for the error you encountered:
Traceback (most recent call last):
File "site-packages\twisted\internet\defer.py", line 457, in callback
File "site-packages\twisted\internet\defer.py", line 565, in _startRunCallbacks
File "site-packages\twisted\internet\defer.py", line 651, in _runCallbacks
File "site-packages\twisted\internet\defer.py", line 1355, in gotResult
--- <exception caught here> ---
File "site-packages\twisted\internet\defer.py", line 1299, in _inlineCallbacks
File "site-packages\lbrynet\daemon\Daemon.py", line 2536, in jsonrpc_blob_list
File "site-packages\lbrynet\core\utils.py", line 125, in get_sd_hash
exceptions.KeyError: 'stream'
|
exceptions.KeyError
|
def jsonrpc_blob_list(
self,
uri=None,
stream_hash=None,
sd_hash=None,
needed=None,
finished=None,
page_size=None,
page=None,
):
"""
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
Usage:
blob_list [-n] [-f] [<uri> | --uri=<uri>] [<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>] [<page_size> | --page_size=<page_size>]
[<page> | --page=<page>]
Options:
-n : only return needed blobs
-f : only return finished blobs
<uri>, --uri=<uri> : filter blobs by stream in a uri
<stream_hash>, --stream_hash=<stream_hash> : filter blobs by stream hash
<sd_hash>, --sd_hash=<sd_hash> : filter blobs by sd hash
<page_size>, --page_size=<page_size> : results page size
<page>, --page=<page> : page of results to return
Returns:
(list) List of blob hashes
"""
if uri or stream_hash or sd_hash:
if uri:
metadata = yield self._resolve_name(uri)
sd_hash = utils.get_sd_hash(metadata)
stream_hash = yield self.session.storage.get_stream_hash_for_sd_hash(
sd_hash
)
elif stream_hash:
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(
stream_hash
)
elif sd_hash:
stream_hash = yield self.session.storage.get_stream_hash_for_sd_hash(
sd_hash
)
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(
stream_hash
)
if stream_hash:
blobs = yield self.session.storage.get_blobs_for_stream(stream_hash)
else:
blobs = []
# get_blobs_for_stream does not include the sd blob, so we'll add it manually
if sd_hash in self.session.blob_manager.blobs:
blobs = [self.session.blob_manager.blobs[sd_hash]] + blobs
else:
blobs = self.session.blob_manager.blobs.itervalues()
if needed:
blobs = [blob for blob in blobs if not blob.get_is_verified()]
if finished:
blobs = [blob for blob in blobs if blob.get_is_verified()]
blob_hashes = [blob.blob_hash for blob in blobs if blob.blob_hash]
page_size = page_size or len(blob_hashes)
page = page or 0
start_index = page * page_size
stop_index = start_index + page_size
blob_hashes_for_return = blob_hashes[start_index:stop_index]
response = yield self._render_response(blob_hashes_for_return)
defer.returnValue(response)
|
def jsonrpc_blob_list(
self,
uri=None,
stream_hash=None,
sd_hash=None,
needed=None,
finished=None,
page_size=None,
page=None,
):
"""
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
Usage:
blob_list [-n] [-f] [<uri> | --uri=<uri>] [<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>] [<page_size> | --page_size=<page_size>]
[<page> | --page=<page>]
Options:
-n : only return needed blobs
-f : only return finished blobs
<uri>, --uri=<uri> : filter blobs by stream in a uri
<stream_hash>, --stream_hash=<stream_hash> : filter blobs by stream hash
<sd_hash>, --sd_hash=<sd_hash> : filter blobs by sd hash
<page_size>, --page_size=<page_size> : results page size
<page>, --page=<page> : page of results to return
Returns:
(list) List of blob hashes
"""
if uri:
metadata = yield self._resolve_name(uri)
sd_hash = utils.get_sd_hash(metadata)
try:
blobs = yield self.get_blobs_for_sd_hash(sd_hash)
except NoSuchSDHash:
blobs = []
elif stream_hash:
try:
blobs = yield self.get_blobs_for_stream_hash(stream_hash)
except NoSuchStreamHash:
blobs = []
elif sd_hash:
try:
blobs = yield self.get_blobs_for_sd_hash(sd_hash)
except NoSuchSDHash:
blobs = []
else:
blobs = self.session.blob_manager.blobs.itervalues()
if needed:
blobs = [blob for blob in blobs if not blob.get_is_verified()]
if finished:
blobs = [blob for blob in blobs if blob.get_is_verified()]
blob_hashes = [blob.blob_hash for blob in blobs]
page_size = page_size or len(blob_hashes)
page = page or 0
start_index = page * page_size
stop_index = start_index + page_size
blob_hashes_for_return = blob_hashes[start_index:stop_index]
response = yield self._render_response(blob_hashes_for_return)
defer.returnValue(response)
|
https://github.com/lbryio/lbry-sdk/issues/895
|
Here's the traceback for the error you encountered:
Traceback (most recent call last):
File "site-packages\twisted\internet\defer.py", line 457, in callback
File "site-packages\twisted\internet\defer.py", line 565, in _startRunCallbacks
File "site-packages\twisted\internet\defer.py", line 651, in _runCallbacks
File "site-packages\twisted\internet\defer.py", line 1355, in gotResult
--- <exception caught here> ---
File "site-packages\twisted\internet\defer.py", line 1299, in _inlineCallbacks
File "site-packages\lbrynet\daemon\Daemon.py", line 2536, in jsonrpc_blob_list
File "site-packages\lbrynet\core\utils.py", line 125, in get_sd_hash
exceptions.KeyError: 'stream'
|
exceptions.KeyError
|
def stop(self):
"""Stop creating the stream. Create the terminating zero-length blob."""
log.debug("stop has been called for StreamCreator")
self.stopped = True
if self.current_blob is not None:
self._close_current_blob()
self._finalize()
dl = defer.DeferredList(self.finished_deferreds)
dl.addCallback(lambda _: self._finished())
return dl
|
def stop(self):
"""Stop creating the stream. Create the terminating zero-length blob."""
log.debug("stop has been called for StreamCreator")
self.stopped = True
if self.current_blob is not None:
self._close_current_blob()
self._finalize()
dl = defer.DeferredList(self.finished_deferreds)
dl.addCallback(lambda _: self._finished())
dl.addErrback(self._error)
return dl
|
https://github.com/lbryio/lbry-sdk/issues/905
|
2017-09-19 09:47:51,891 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://battlefront-ii-storytrailer (5c3d3c) is finished
2017-09-19 09:47:51,904 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://hitmans-bodyguard-trailer (3303b0) is running
2017-09-19 09:47:51,914 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://thelovewitch-trailer (59edac) is finished
2017-09-19 09:47:51,964 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://bellflower-trailer (618fbb) is finished
2017-09-19 09:47:51,986 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://itsadisaster-threesome (395066) is finished
2017-09-19 09:47:51,987 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://jinglebellrocks-trailer (224aa1) is finished
2017-09-19 09:47:52,009 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://pub (57fdb5) is finished
2017-09-19 09:47:52,025 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://lbryscrreen1 (2dab0b) is finished
2017-09-19 09:47:52,072 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://lbryscrreen1 (2dab0b) is finished
2017-09-19 09:47:52,082 INFO lbrynet.core.client.ConnectionManager:42: Connection Manager The Hitman’s Bodyguard - Redband Trailer (2017).mp4 initialized
2017-09-19 09:47:52,084 INFO lbrynet.core.client.ConnectionManager:42: Connection Manager 1 - 'Fear the Boom and Bust' - Keynes vs. Hayek Rap Battle.mp4 initialized
2017-09-19 09:47:52,099 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://kedi-trailer (5731e9) is stopped
2017-09-19 09:47:52,114 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://kedi-trailer (5731e9) is stopped
2017-09-19 09:47:52,142 CRITICAL twisted:154: Unhandled error in Deferred:
2017-09-19 09:47:52,144 CRITICAL twisted:154:
Traceback (most recent call last):
File "site-packages\twisted\internet\defer.py", line 1299, in _inlineCallbacks
File "site-packages\lbrynet\core\client\BlobRequester.py", line 70, in get_new_peers_for_head_blob
File "site-packages\lbrynet\core\client\DownloadManager.py", line 103, in get_head_blob_hash
KeyError: 0
2017-09-19 09:47:52,150 CRITICAL twisted:154: Unhandled error in Deferred:
2017-09-19 09:47:52,150 CRITICAL twisted:154:
Traceback (most recent call last):
File "site-packages\twisted\internet\defer.py", line 1299, in _inlineCallbacks
File "site-packages\lbrynet\core\client\BlobRequester.py", line 70, in get_new_peers_for_head_blob
File "site-packages\lbrynet\core\client\DownloadManager.py", line 103, in get_head_blob_hash
KeyError: 0
|
KeyError
|
def _get_new_peers(self):
new_conns_needed = self.max_connections_per_stream - len(self._peer_connections)
if new_conns_needed < 1:
defer.returnValue([])
# we always get the peer from the first request creator
# must be a type BlobRequester...
request_creator = self._primary_request_creators[0]
log.debug("%s Trying to get a new peer to connect to", self._get_log_name())
# find peers for the head blob if configured to do so
if self.seek_head_blob_first:
try:
peers = yield request_creator.get_new_peers_for_head_blob()
peers = self.return_shuffled_peers_not_connected_to(peers, new_conns_needed)
except KeyError:
log.warning("%s does not have a head blob", self._get_log_name())
peers = []
else:
peers = []
# we didn't find any new peers on the head blob,
# we have to look for the first unavailable blob
if not peers:
peers = yield request_creator.get_new_peers_for_next_unavailable()
peers = self.return_shuffled_peers_not_connected_to(peers, new_conns_needed)
log.debug("%s Got a list of peers to choose from: %s", self._get_log_name(), peers)
log.debug(
"%s Current connections: %s",
self._get_log_name(),
self._peer_connections.keys(),
)
log.debug(
"%s List of connection states: %s",
self._get_log_name(),
[p_c_h.connection.state for p_c_h in self._peer_connections.values()],
)
defer.returnValue(peers)
|
def _get_new_peers(self):
new_conns_needed = self.max_connections_per_stream - len(self._peer_connections)
if new_conns_needed < 1:
defer.returnValue([])
# we always get the peer from the first request creator
# must be a type BlobRequester...
request_creator = self._primary_request_creators[0]
log.debug("%s Trying to get a new peer to connect to", self._get_log_name())
# find peers for the head blob if configured to do so
if self.seek_head_blob_first:
peers = yield request_creator.get_new_peers_for_head_blob()
peers = self.return_shuffled_peers_not_connected_to(peers, new_conns_needed)
else:
peers = []
# we didn't find any new peers on the head blob,
# we have to look for the first unavailable blob
if not peers:
peers = yield request_creator.get_new_peers_for_next_unavailable()
peers = self.return_shuffled_peers_not_connected_to(peers, new_conns_needed)
log.debug("%s Got a list of peers to choose from: %s", self._get_log_name(), peers)
log.debug(
"%s Current connections: %s",
self._get_log_name(),
self._peer_connections.keys(),
)
log.debug(
"%s List of connection states: %s",
self._get_log_name(),
[p_c_h.connection.state for p_c_h in self._peer_connections.values()],
)
defer.returnValue(peers)
|
https://github.com/lbryio/lbry-sdk/issues/905
|
2017-09-19 09:47:51,891 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://battlefront-ii-storytrailer (5c3d3c) is finished
2017-09-19 09:47:51,904 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://hitmans-bodyguard-trailer (3303b0) is running
2017-09-19 09:47:51,914 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://thelovewitch-trailer (59edac) is finished
2017-09-19 09:47:51,964 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://bellflower-trailer (618fbb) is finished
2017-09-19 09:47:51,986 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://itsadisaster-threesome (395066) is finished
2017-09-19 09:47:51,987 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://jinglebellrocks-trailer (224aa1) is finished
2017-09-19 09:47:52,009 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://pub (57fdb5) is finished
2017-09-19 09:47:52,025 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://lbryscrreen1 (2dab0b) is finished
2017-09-19 09:47:52,072 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://lbryscrreen1 (2dab0b) is finished
2017-09-19 09:47:52,082 INFO lbrynet.core.client.ConnectionManager:42: Connection Manager The Hitman’s Bodyguard - Redband Trailer (2017).mp4 initialized
2017-09-19 09:47:52,084 INFO lbrynet.core.client.ConnectionManager:42: Connection Manager 1 - 'Fear the Boom and Bust' - Keynes vs. Hayek Rap Battle.mp4 initialized
2017-09-19 09:47:52,099 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://kedi-trailer (5731e9) is stopped
2017-09-19 09:47:52,114 INFO lbrynet.file_manager.EncryptedFileDownloader:32: lbry://kedi-trailer (5731e9) is stopped
2017-09-19 09:47:52,142 CRITICAL twisted:154: Unhandled error in Deferred:
2017-09-19 09:47:52,144 CRITICAL twisted:154:
Traceback (most recent call last):
File "site-packages\twisted\internet\defer.py", line 1299, in _inlineCallbacks
File "site-packages\lbrynet\core\client\BlobRequester.py", line 70, in get_new_peers_for_head_blob
File "site-packages\lbrynet\core\client\DownloadManager.py", line 103, in get_head_blob_hash
KeyError: 0
2017-09-19 09:47:52,150 CRITICAL twisted:154: Unhandled error in Deferred:
2017-09-19 09:47:52,150 CRITICAL twisted:154:
Traceback (most recent call last):
File "site-packages\twisted\internet\defer.py", line 1299, in _inlineCallbacks
File "site-packages\lbrynet\core\client\BlobRequester.py", line 70, in get_new_peers_for_head_blob
File "site-packages\lbrynet\core\client\DownloadManager.py", line 103, in get_head_blob_hash
KeyError: 0
|
KeyError
|
def check_connection(server="lbry.io", port=80, timeout=2, bypass_dns=False):
"""Attempts to open a socket to server:port and returns True if successful."""
log.debug("Checking connection to %s:%s", server, port)
try:
if not bypass_dns:
server = socket.gethostbyname(server)
socket.create_connection((server, port), timeout)
log.debug("Connection successful")
return True
except (socket.gaierror, socket.herror) as ex:
log.info(
"Failed to connect to %s:%s. Unable to resolve domain. Trying to bypass DNS",
server,
port,
exc_info=True,
)
try:
server = "8.8.8.8"
port = 53
socket.create_connection((server, port), timeout)
log.debug("Connection successful")
return True
except Exception as ex:
log.info(
"Failed to connect to %s:%s. Maybe the internet connection is not working",
server,
port,
exc_info=True,
)
return False
except Exception as ex:
log.info(
"Failed to connect to %s:%s. Maybe the internet connection is not working",
server,
port,
exc_info=True,
)
return False
|
def check_connection(server="lbry.io", port=80):
"""Attempts to open a socket to server:port and returns True if successful."""
try:
log.debug("Checking connection to %s:%s", server, port)
host = socket.gethostbyname(server)
s = socket.create_connection((host, port), 2)
log.debug("Connection successful")
return True
except Exception as ex:
log.info(
"Failed to connect to %s:%s. Maybe the internet connection is not working",
server,
port,
exc_info=True,
)
return False
|
https://github.com/lbryio/lbry-sdk/issues/908
|
$ /opt/lbry/lbrynet-daemon --version
{
"build": "release",
"installation_id": "5GfsGqN89ysnaQiorZZNg3P3vDbc5WWJ75FbNDEiCoZXpzE4beznwMTHbNPYLww8gA",
"lbrynet_version": "0.16.0",
"lbryschema_version": "0.0.11",
"lbryum_version": "3.1.7",
"os_release": "4.4.0-93-generic",
"os_system": "Linux",
"platform": "Linux-4.4.0-93-generic-x86_64-with-Ubuntu-16.04-xenial",
"processor": "x86_64",
"python_version": "2.7.12"
}
$ /opt/lbry/lbrynet-daemon
2017-09-20 13:40:34,369 INFO lbrynet.daemon.DaemonControl:70: Starting lbrynet-daemon from command line
2017-09-20 13:40:34,373 INFO lbrynet.core.utils:108: Failed to connect to lbry.io:80. Maybe the internet connection is not working
Traceback (most recent call last):
File "site-packages/lbrynet/core/utils.py", line 101, in check_connection
gaierror: [Errno -2] Name or service not known
2017-09-20 13:40:34,374 INFO lbrynet.daemon.DaemonControl:77: Not connected to internet, unable to start
|
gaierror
|
def create_key_bindings(history, python_input, history_mapping):
"""
Key bindings.
"""
bindings = KeyBindings()
handle = bindings.add
@handle(" ", filter=has_focus(history.history_buffer))
def _(event):
"""
Space: select/deselect line from history pane.
"""
b = event.current_buffer
line_no = b.document.cursor_position_row
if not history_mapping.history_lines:
# If we've no history, then nothing to do
return
if line_no in history_mapping.selected_lines:
# Remove line.
history_mapping.selected_lines.remove(line_no)
history_mapping.update_default_buffer()
else:
# Add line.
history_mapping.selected_lines.add(line_no)
history_mapping.update_default_buffer()
# Update cursor position
default_buffer = history.default_buffer
default_lineno = (
sorted(history_mapping.selected_lines).index(line_no)
+ history_mapping.result_line_offset
)
default_buffer.cursor_position = (
default_buffer.document.translate_row_col_to_index(default_lineno, 0)
)
# Also move the cursor to the next line. (This way they can hold
# space to select a region.)
b.cursor_position = b.document.translate_row_col_to_index(line_no + 1, 0)
@handle(" ", filter=has_focus(DEFAULT_BUFFER))
@handle("delete", filter=has_focus(DEFAULT_BUFFER))
@handle("c-h", filter=has_focus(DEFAULT_BUFFER))
def _(event):
"""
Space: remove line from default pane.
"""
b = event.current_buffer
line_no = b.document.cursor_position_row - history_mapping.result_line_offset
if line_no >= 0:
try:
history_lineno = sorted(history_mapping.selected_lines)[line_no]
except IndexError:
pass # When `selected_lines` is an empty set.
else:
history_mapping.selected_lines.remove(history_lineno)
history_mapping.update_default_buffer()
help_focussed = has_focus(history.help_buffer)
main_buffer_focussed = has_focus(history.history_buffer) | has_focus(
history.default_buffer
)
@handle("tab", filter=main_buffer_focussed)
@handle("c-x", filter=main_buffer_focussed, eager=True)
# Eager: ignore the Emacs [Ctrl-X Ctrl-X] binding.
@handle("c-w", filter=main_buffer_focussed)
def _(event):
"Select other window."
_select_other_window(history)
@handle("f4")
def _(event):
"Switch between Emacs/Vi mode."
python_input.vi_mode = not python_input.vi_mode
@handle("f1")
def _(event):
"Display/hide help."
_toggle_help(history)
@handle("enter", filter=help_focussed)
@handle("c-c", filter=help_focussed)
@handle("c-g", filter=help_focussed)
@handle("escape", filter=help_focussed)
def _(event):
"Leave help."
event.app.layout.focus_previous()
@handle("q", filter=main_buffer_focussed)
@handle("f3", filter=main_buffer_focussed)
@handle("c-c", filter=main_buffer_focussed)
@handle("c-g", filter=main_buffer_focussed)
def _(event):
"Cancel and go back."
event.app.exit(result=None)
@handle("enter", filter=main_buffer_focussed)
def _(event):
"Accept input."
event.app.exit(result=history.default_buffer.text)
enable_system_bindings = Condition(lambda: python_input.enable_system_bindings)
@handle("c-z", filter=enable_system_bindings)
def _(event):
"Suspend to background."
event.app.suspend_to_background()
return bindings
|
def create_key_bindings(history, python_input, history_mapping):
"""
Key bindings.
"""
bindings = KeyBindings()
handle = bindings.add
@handle(" ", filter=has_focus(history.history_buffer))
def _(event):
"""
Space: select/deselect line from history pane.
"""
b = event.current_buffer
line_no = b.document.cursor_position_row
if line_no in history_mapping.selected_lines:
# Remove line.
history_mapping.selected_lines.remove(line_no)
history_mapping.update_default_buffer()
else:
# Add line.
history_mapping.selected_lines.add(line_no)
history_mapping.update_default_buffer()
# Update cursor position
default_buffer = history.default_buffer
default_lineno = (
sorted(history_mapping.selected_lines).index(line_no)
+ history_mapping.result_line_offset
)
default_buffer.cursor_position = (
default_buffer.document.translate_row_col_to_index(default_lineno, 0)
)
# Also move the cursor to the next line. (This way they can hold
# space to select a region.)
b.cursor_position = b.document.translate_row_col_to_index(line_no + 1, 0)
@handle(" ", filter=has_focus(DEFAULT_BUFFER))
@handle("delete", filter=has_focus(DEFAULT_BUFFER))
@handle("c-h", filter=has_focus(DEFAULT_BUFFER))
def _(event):
"""
Space: remove line from default pane.
"""
b = event.current_buffer
line_no = b.document.cursor_position_row - history_mapping.result_line_offset
if line_no >= 0:
try:
history_lineno = sorted(history_mapping.selected_lines)[line_no]
except IndexError:
pass # When `selected_lines` is an empty set.
else:
history_mapping.selected_lines.remove(history_lineno)
history_mapping.update_default_buffer()
help_focussed = has_focus(history.help_buffer)
main_buffer_focussed = has_focus(history.history_buffer) | has_focus(
history.default_buffer
)
@handle("tab", filter=main_buffer_focussed)
@handle("c-x", filter=main_buffer_focussed, eager=True)
# Eager: ignore the Emacs [Ctrl-X Ctrl-X] binding.
@handle("c-w", filter=main_buffer_focussed)
def _(event):
"Select other window."
_select_other_window(history)
@handle("f4")
def _(event):
"Switch between Emacs/Vi mode."
python_input.vi_mode = not python_input.vi_mode
@handle("f1")
def _(event):
"Display/hide help."
_toggle_help(history)
@handle("enter", filter=help_focussed)
@handle("c-c", filter=help_focussed)
@handle("c-g", filter=help_focussed)
@handle("escape", filter=help_focussed)
def _(event):
"Leave help."
event.app.layout.focus_previous()
@handle("q", filter=main_buffer_focussed)
@handle("f3", filter=main_buffer_focussed)
@handle("c-c", filter=main_buffer_focussed)
@handle("c-g", filter=main_buffer_focussed)
def _(event):
"Cancel and go back."
event.app.exit(result=None)
@handle("enter", filter=main_buffer_focussed)
def _(event):
"Accept input."
event.app.exit(result=history.default_buffer.text)
enable_system_bindings = Condition(lambda: python_input.enable_system_bindings)
@handle("c-z", filter=enable_system_bindings)
def _(event):
"Suspend to background."
event.app.suspend_to_background()
return bindings
|
https://github.com/prompt-toolkit/ptpython/issues/214
|
Traceback (most recent call last): │
File "/home/me/.local/bin/ptpython3", line 11, in <module> │
sys.exit(run()) │
File "/home/me/.local/lib/python3.6/site-packages/ptpython/entry_points/run_ptpython.py", line 74, in run
title='Python REPL (ptpython)') │
File "/home/me/.local/lib/python3.6/site-packages/ptpython/repl.py", line 320, in embed │
cli.run() │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/interface.py", line 415, in run
self.eventloop.run(self.input, self.create_eventloop_callbacks()) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/eventloop/posix.py", line 159, in run
t() │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/eventloop/posix.py", line 82, in read_from_stdin
inputstream.feed(data) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/terminal/vt100_input.py", line 398, in feed
self._input_parser.send(c) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/terminal/vt100_input.py", line 325, in _input_parser_generator
self._call_handler(prefix[0], prefix[0]) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/terminal/vt100_input.py", line 340, in _call_handler
self.feed_key_callback(KeyPress(key, insert_text)) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/interface.py", line 1048, in feed_key
cli.input_processor.process_keys() │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/key_binding/input_processor.py", line 219, in process_keys
self._process_coroutine.send(key_press) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/key_binding/input_processor.py", line 176, in _process
self._call_handler(matches[-1], key_sequence=buffer[:]) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/key_binding/input_processor.py", line 247, in _call_handler
handler.call(event) │
File "/home/me/.local/lib/python3.6/site-packages/prompt_toolkit/key_binding/registry.py", line 61, in call
return self.handler(event) │
File "/home/me/.local/lib/python3.6/site-packages/ptpython/history_browser.py", line 443, in _
history_mapping.update_default_buffer(event.cli) │
File "/home/me/.local/lib/python3.6/site-packages/ptpython/history_browser.py", line 399, in update_default_buffer
self.get_new_document(b.cursor_position), bypass_readonly=True) │
File "/home/me/.local/lib/python3.6/site-packages/ptpython/history_browser.py", line 383, in get_new_document
lines.append(self.history_lines[line_no]) │
IndexError: list index out of range │
|
IndexError
|
def run(user_ns=None):
a = docopt.docopt(__doc__)
vi_mode = bool(a["--vi"])
config_dir = os.path.expanduser(a["--config-dir"] or "~/.ptpython/")
# Create config directory.
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
# If IPython is not available, show message and exit here with error status
# code.
try:
import IPython
except ImportError:
print("IPython not found. Please install IPython (pip install ipython).")
sys.exit(1)
else:
from ptpython.ipython import embed
from ptpython.repl import run_config, enable_deprecation_warnings
# Add the current directory to `sys.path`.
if sys.path[0] != "":
sys.path.insert(0, "")
# When a file has been given, run that, otherwise start the shell.
if a["<arg>"] and not a["--interactive"]:
sys.argv = a["<arg>"]
path = a["<arg>"][0]
with open(path, "rb") as f:
code = compile(f.read(), path, "exec")
six.exec_(code)
else:
enable_deprecation_warnings()
# Create an empty namespace for this interactive shell. (If we don't do
# that, all the variables from this function will become available in
# the IPython shell.)
if user_ns is None:
user_ns = {}
# Startup path
startup_paths = []
if "PYTHONSTARTUP" in os.environ:
startup_paths.append(os.environ["PYTHONSTARTUP"])
# --interactive
if a["--interactive"]:
startup_paths.append(a["--interactive"])
sys.argv = [a["--interactive"]] + a["<arg>"]
# exec scripts from startup paths
for path in startup_paths:
if os.path.exists(path):
with open(path, "rb") as f:
code = compile(f.read(), path, "exec")
six.exec_(code, user_ns, user_ns)
else:
print("File not found: {}\n\n".format(path))
sys.exit(1)
# Apply config file
def configure(repl):
path = os.path.join(config_dir, "config.py")
if os.path.exists(path):
run_config(repl, path)
# Run interactive shell.
embed(
vi_mode=vi_mode,
history_filename=os.path.join(config_dir, "history"),
configure=configure,
user_ns=user_ns,
title="IPython REPL (ptipython)",
)
|
def run(user_ns=None):
a = docopt.docopt(__doc__)
vi_mode = bool(a["--vi"])
config_dir = os.path.expanduser(a["--config-dir"] or "~/.ptpython/")
# Create config directory.
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
# If IPython is not available, show message and exit here with error status
# code.
try:
import IPython
except ImportError:
print("IPython not found. Please install IPython (pip install ipython).")
sys.exit(1)
else:
from ptpython.ipython import embed
from ptpython.repl import run_config, enable_deprecation_warnings
# Add the current directory to `sys.path`.
if sys.path[0] != "":
sys.path.insert(0, "")
# When a file has been given, run that, otherwise start the shell.
if a["<arg>"] and not a["--interactive"]:
sys.argv = a["<arg>"]
six.exec_(compile(open(a["<arg>"][0], "rb").read(), a["<arg>"][0], "exec"))
else:
enable_deprecation_warnings()
# Create an empty namespace for this interactive shell. (If we don't do
# that, all the variables from this function will become available in
# the IPython shell.)
if user_ns is None:
user_ns = {}
# Startup path
startup_paths = []
if "PYTHONSTARTUP" in os.environ:
startup_paths.append(os.environ["PYTHONSTARTUP"])
# --interactive
if a["--interactive"]:
startup_paths.append(a["--interactive"])
sys.argv = [a["--interactive"]] + a["<arg>"]
# exec scripts from startup paths
for path in startup_paths:
if os.path.exists(path):
with open(path, "r") as f:
code = compile(f.read(), path, "exec")
six.exec_(code, user_ns, user_ns)
else:
print("File not found: {}\n\n".format(path))
sys.exit(1)
# Apply config file
def configure(repl):
path = os.path.join(config_dir, "config.py")
if os.path.exists(path):
run_config(repl, path)
# Run interactive shell.
embed(
vi_mode=vi_mode,
history_filename=os.path.join(config_dir, "history"),
configure=configure,
user_ns=user_ns,
title="IPython REPL (ptipython)",
)
|
https://github.com/prompt-toolkit/ptpython/issues/247
|
Traceback (most recent call last):
File "c:\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Python36\Scripts\ptpython.exe\__main__.py", line 9, in <module>
File "c:\python36\lib\site-packages\ptpython\entry_points\run_ptpython.py", line 74, in run
title='Python REPL (ptpython)')
File "c:\python36\lib\site-packages\ptpython\repl.py", line 320, in embed
cli.run()
File "c:\python36\lib\site-packages\prompt_toolkit\interface.py", line 404, in run
self.on_start.fire()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 60, in fire
self()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 56, in __call__
handler(self.sender)
File "c:\python36\lib\site-packages\ptpython\repl.py", line 62, in _on_start
code = compile(f.read(), path, 'exec')
File "c:\python36\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 2: character maps to <undefined>
|
UnicodeDecodeError
|
def run():
a = docopt.docopt(__doc__)
vi_mode = bool(a["--vi"])
config_dir = os.path.expanduser(a["--config-dir"] or "~/.ptpython/")
# Create config directory.
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
# Startup path
startup_paths = []
if "PYTHONSTARTUP" in os.environ:
startup_paths.append(os.environ["PYTHONSTARTUP"])
# --interactive
if a["--interactive"]:
startup_paths.append(a["--interactive"])
sys.argv = [a["--interactive"]] + a["<arg>"]
# Add the current directory to `sys.path`.
if sys.path[0] != "":
sys.path.insert(0, "")
# When a file has been given, run that, otherwise start the shell.
if a["<arg>"] and not a["--interactive"]:
sys.argv = a["<arg>"]
path = a["<arg>"][0]
with open(path, "rb") as f:
code = compile(f.read(), path, "exec")
six.exec_(code)
# Run interactive shell.
else:
enable_deprecation_warnings()
# Apply config file
def configure(repl):
path = os.path.join(config_dir, "config.py")
if os.path.exists(path):
run_config(repl, path)
import __main__
embed(
vi_mode=vi_mode,
history_filename=os.path.join(config_dir, "history"),
configure=configure,
locals=__main__.__dict__,
globals=__main__.__dict__,
startup_paths=startup_paths,
title="Python REPL (ptpython)",
)
|
def run():
a = docopt.docopt(__doc__)
vi_mode = bool(a["--vi"])
config_dir = os.path.expanduser(a["--config-dir"] or "~/.ptpython/")
# Create config directory.
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
# Startup path
startup_paths = []
if "PYTHONSTARTUP" in os.environ:
startup_paths.append(os.environ["PYTHONSTARTUP"])
# --interactive
if a["--interactive"]:
startup_paths.append(a["--interactive"])
sys.argv = [a["--interactive"]] + a["<arg>"]
# Add the current directory to `sys.path`.
if sys.path[0] != "":
sys.path.insert(0, "")
# When a file has been given, run that, otherwise start the shell.
if a["<arg>"] and not a["--interactive"]:
sys.argv = a["<arg>"]
six.exec_(compile(open(a["<arg>"][0], "rb").read(), a["<arg>"][0], "exec"))
# Run interactive shell.
else:
enable_deprecation_warnings()
# Apply config file
def configure(repl):
path = os.path.join(config_dir, "config.py")
if os.path.exists(path):
run_config(repl, path)
import __main__
embed(
vi_mode=vi_mode,
history_filename=os.path.join(config_dir, "history"),
configure=configure,
locals=__main__.__dict__,
globals=__main__.__dict__,
startup_paths=startup_paths,
title="Python REPL (ptpython)",
)
|
https://github.com/prompt-toolkit/ptpython/issues/247
|
Traceback (most recent call last):
File "c:\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Python36\Scripts\ptpython.exe\__main__.py", line 9, in <module>
File "c:\python36\lib\site-packages\ptpython\entry_points\run_ptpython.py", line 74, in run
title='Python REPL (ptpython)')
File "c:\python36\lib\site-packages\ptpython\repl.py", line 320, in embed
cli.run()
File "c:\python36\lib\site-packages\prompt_toolkit\interface.py", line 404, in run
self.on_start.fire()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 60, in fire
self()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 56, in __call__
handler(self.sender)
File "c:\python36\lib\site-packages\ptpython\repl.py", line 62, in _on_start
code = compile(f.read(), path, 'exec')
File "c:\python36\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 2: character maps to <undefined>
|
UnicodeDecodeError
|
def _load_start_paths(self):
"Start the Read-Eval-Print Loop."
if self._startup_paths:
for path in self._startup_paths:
if os.path.exists(path):
with open(path, "rb") as f:
code = compile(f.read(), path, "exec")
six.exec_(code, self.get_globals(), self.get_locals())
else:
output = self.app.output
output.write("WARNING | File not found: {}\n\n".format(path))
|
def _load_start_paths(self):
"Start the Read-Eval-Print Loop."
if self._startup_paths:
for path in self._startup_paths:
if os.path.exists(path):
with open(path, "r") as f:
code = compile(f.read(), path, "exec")
six.exec_(code, self.get_globals(), self.get_locals())
else:
output = self.app.output
output.write("WARNING | File not found: {}\n\n".format(path))
|
https://github.com/prompt-toolkit/ptpython/issues/247
|
Traceback (most recent call last):
File "c:\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Python36\Scripts\ptpython.exe\__main__.py", line 9, in <module>
File "c:\python36\lib\site-packages\ptpython\entry_points\run_ptpython.py", line 74, in run
title='Python REPL (ptpython)')
File "c:\python36\lib\site-packages\ptpython\repl.py", line 320, in embed
cli.run()
File "c:\python36\lib\site-packages\prompt_toolkit\interface.py", line 404, in run
self.on_start.fire()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 60, in fire
self()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 56, in __call__
handler(self.sender)
File "c:\python36\lib\site-packages\ptpython\repl.py", line 62, in _on_start
code = compile(f.read(), path, 'exec')
File "c:\python36\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 2: character maps to <undefined>
|
UnicodeDecodeError
|
def run_config(repl, config_file="~/.ptpython/config.py"):
"""
Execute REPL config file.
:param repl: `PythonInput` instance.
:param config_file: Path of the configuration file.
"""
assert isinstance(repl, PythonInput)
assert isinstance(config_file, six.text_type)
# Expand tildes.
config_file = os.path.expanduser(config_file)
def enter_to_continue():
six.moves.input("\nPress ENTER to continue...")
# Check whether this file exists.
if not os.path.exists(config_file):
print("Impossible to read %r" % config_file)
enter_to_continue()
return
# Run the config file in an empty namespace.
try:
namespace = {}
with open(config_file, "rb") as f:
code = compile(f.read(), config_file, "exec")
six.exec_(code, namespace, namespace)
# Now we should have a 'configure' method in this namespace. We call this
# method with the repl as an argument.
if "configure" in namespace:
namespace["configure"](repl)
except Exception:
traceback.print_exc()
enter_to_continue()
|
def run_config(repl, config_file="~/.ptpython/config.py"):
"""
Execute REPL config file.
:param repl: `PythonInput` instance.
:param config_file: Path of the configuration file.
"""
assert isinstance(repl, PythonInput)
assert isinstance(config_file, six.text_type)
# Expand tildes.
config_file = os.path.expanduser(config_file)
def enter_to_continue():
six.moves.input("\nPress ENTER to continue...")
# Check whether this file exists.
if not os.path.exists(config_file):
print("Impossible to read %r" % config_file)
enter_to_continue()
return
# Run the config file in an empty namespace.
try:
namespace = {}
with open(config_file, "r") as f:
code = compile(f.read(), config_file, "exec")
six.exec_(code, namespace, namespace)
# Now we should have a 'configure' method in this namespace. We call this
# method with the repl as an argument.
if "configure" in namespace:
namespace["configure"](repl)
except Exception:
traceback.print_exc()
enter_to_continue()
|
https://github.com/prompt-toolkit/ptpython/issues/247
|
Traceback (most recent call last):
File "c:\python36\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Python36\Scripts\ptpython.exe\__main__.py", line 9, in <module>
File "c:\python36\lib\site-packages\ptpython\entry_points\run_ptpython.py", line 74, in run
title='Python REPL (ptpython)')
File "c:\python36\lib\site-packages\ptpython\repl.py", line 320, in embed
cli.run()
File "c:\python36\lib\site-packages\prompt_toolkit\interface.py", line 404, in run
self.on_start.fire()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 60, in fire
self()
File "c:\python36\lib\site-packages\prompt_toolkit\utils.py", line 56, in __call__
handler(self.sender)
File "c:\python36\lib\site-packages\ptpython\repl.py", line 62, in _on_start
code = compile(f.read(), path, 'exec')
File "c:\python36\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 2: character maps to <undefined>
|
UnicodeDecodeError
|
def get_jedi_script_from_document(document, locals, globals):
import jedi # We keep this import in-line, to improve start-up time.
# Importing Jedi is 'slow'.
try:
return jedi.Interpreter(
document.text,
column=document.cursor_position_col,
line=document.cursor_position_row + 1,
path="input-text",
namespaces=[locals, globals],
)
except ValueError:
# Invalid cursor position.
# ValueError('`column` parameter is not in a valid range.')
return None
except AttributeError:
# Workaround for #65: https://github.com/jonathanslenders/python-prompt-toolkit/issues/65
# See also: https://github.com/davidhalter/jedi/issues/508
return None
except IndexError:
# Workaround Jedi issue #514: for https://github.com/davidhalter/jedi/issues/514
return None
except KeyError:
# Workaroud for a crash when the input is "u'", the start of a unicode string.
return None
except Exception:
# Workaround for: https://github.com/jonathanslenders/ptpython/issues/91
return None
|
def get_jedi_script_from_document(document, locals, globals):
import jedi # We keep this import in-line, to improve start-up time.
# Importing Jedi is 'slow'.
try:
return jedi.Interpreter(
document.text,
column=document.cursor_position_col,
line=document.cursor_position_row + 1,
path="input-text",
namespaces=[locals, globals],
)
except ValueError:
# Invalid cursor position.
# ValueError('`column` parameter is not in a valid range.')
return None
except AttributeError:
# Workaround for #65: https://github.com/jonathanslenders/python-prompt-toolkit/issues/65
# See also: https://github.com/davidhalter/jedi/issues/508
return None
except IndexError:
# Workaround Jedi issue #514: for https://github.com/davidhalter/jedi/issues/514
return None
except KeyError:
# Workaroud for a crash when the input is "u'", the start of a unicode string.
return None
|
https://github.com/prompt-toolkit/ptpython/issues/91
|
File "/home/jonathan/git/ptpython/ptpython/utils.py", line 52, t
namespaces=[locals, globals])
File "/home/jonathan/.virtualenvs/python35/lib/python3.5/site-p_
self._parser.module())
File "/home/jonathan/.virtualenvs/python35/lib/python3.5/site-pe
return self._parser().module
File "/home/jonathan/.virtualenvs/python35/lib/python3.5/site-pr
result = func(self)
File "/home/jonathan/.virtualenvs/python35/lib/python3.5/site-pr
parser = Parser(self._grammar, self._source, self._path)
File "/home/jonathan/.virtualenvs/python35/lib/python3.5/site-p_
self.module = p.parse(self._tokenize(tokenizer))
File "/home/jonathan/.virtualenvs/python35/lib/python3.5/site-pe
raise ParseError("incomplete input", type, value, start_pos)
jedi.parser.pgen2.parse.ParseError: incomplete input: type='ENDMA)
During handling of the above exception, another exception occurre:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 923, in _bootstrapr
self.run()
File "/usr/lib/python3.5/threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "/home/jonathan/git/python-prompt-toolkit/prompt_toolkit/in
completions = list(buffer.completer.get_completions(document,)
File "/home/jonathan/git/ptpython/ptpython/completer.py", line s
script = get_jedi_script_from_document(document, self.get_loc)
File "/home/jonathan/git/ptpython/ptpython/utils.py", line 67, t
except jedi.ParseError:
|
jedi.parser.pgen2.parse.ParseError
|
def signature_toolbar(python_input):
"""
Return the `Layout` for the signature.
"""
def get_tokens(cli):
result = []
append = result.append
Signature = Token.Toolbar.Signature
if python_input.signatures:
sig = python_input.signatures[0] # Always take the first one.
append((Signature, " "))
try:
append((Signature, sig.full_name))
except IndexError:
# Workaround for #37: https://github.com/jonathanslenders/python-prompt-toolkit/issues/37
# See also: https://github.com/davidhalter/jedi/issues/490
return []
append((Signature.Operator, "("))
try:
enumerated_params = enumerate(sig.params)
except AttributeError:
# Workaround for #136: https://github.com/jonathanslenders/ptpython/issues/136
# AttributeError: 'Lambda' object has no attribute 'get_subscope_by_name'
return []
for i, p in enumerated_params:
# Workaround for #47: 'p' is None when we hit the '*' in the signature.
# and sig has no 'index' attribute.
# See: https://github.com/jonathanslenders/ptpython/issues/47
# https://github.com/davidhalter/jedi/issues/598
description = p.description if p else "*" # or '*'
sig_index = getattr(sig, "index", 0)
if i == sig_index:
# Note: we use `_Param.description` instead of
# `_Param.name`, that way we also get the '*' before args.
append((Signature.CurrentName, str(description)))
else:
append((Signature, str(description)))
append((Signature.Operator, ", "))
if sig.params:
# Pop last comma
result.pop()
append((Signature.Operator, ")"))
append((Signature, " "))
return result
return ConditionalContainer(
content=Window(TokenListControl(get_tokens), height=LayoutDimension.exact(1)),
# Show only when there is a signature
filter=HasSignature(python_input)
&
# And there are no completions to be shown. (would cover signature pop-up.)
~(
HasCompletions()
& (
show_completions_menu(python_input)
| show_multi_column_completions_menu(python_input)
)
)
# Signature needs to be shown.
& ShowSignature(python_input)
&
# Not done yet.
~IsDone(),
)
|
def signature_toolbar(python_input):
"""
Return the `Layout` for the signature.
"""
def get_tokens(cli):
result = []
append = result.append
Signature = Token.Toolbar.Signature
if python_input.signatures:
sig = python_input.signatures[0] # Always take the first one.
append((Signature, " "))
try:
append((Signature, sig.full_name))
except IndexError:
# Workaround for #37: https://github.com/jonathanslenders/python-prompt-toolkit/issues/37
# See also: https://github.com/davidhalter/jedi/issues/490
return []
append((Signature.Operator, "("))
for i, p in enumerate(sig.params):
# Workaround for #47: 'p' is None when we hit the '*' in the signature.
# and sig has no 'index' attribute.
# See: https://github.com/jonathanslenders/ptpython/issues/47
# https://github.com/davidhalter/jedi/issues/598
description = p.description if p else "*" # or '*'
sig_index = getattr(sig, "index", 0)
if i == sig_index:
# Note: we use `_Param.description` instead of
# `_Param.name`, that way we also get the '*' before args.
append((Signature.CurrentName, str(description)))
else:
append((Signature, str(description)))
append((Signature.Operator, ", "))
if sig.params:
# Pop last comma
result.pop()
append((Signature.Operator, ")"))
append((Signature, " "))
return result
return ConditionalContainer(
content=Window(TokenListControl(get_tokens), height=LayoutDimension.exact(1)),
# Show only when there is a signature
filter=HasSignature(python_input)
&
# And there are no completions to be shown. (would cover signature pop-up.)
~(
HasCompletions()
& (
show_completions_menu(python_input)
| show_multi_column_completions_menu(python_input)
)
)
# Signature needs to be shown.
& ShowSignature(python_input)
&
# Not done yet.
~IsDone(),
)
|
https://github.com/prompt-toolkit/ptpython/issues/136
|
from lxml.html import builder as E
E.HTML(
Traceback (most recent call last):
File "/usr/lib/python3.5/site-packages/prompt_toolkit/cache.py", line 32, in get
return self._data[key]
KeyError: 51
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/ptpython", line 9, in <module>
load_entry_point('ptpython==0.35', 'console_scripts', 'ptpython')()
File "/usr/lib/python3.5/site-packages/ptpython/entry_points/run_ptpython.py", line 71, in run
title='Python REPL (ptpython)')
File "/usr/lib/python3.5/site-packages/ptpython/repl.py", line 315, in embed
cli.run()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/interface.py", line 389, in run
self.eventloop.run(self.input, self.create_eventloop_callbacks())
File "/usr/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py", line 154, in run
t()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/interface.py", line 326, in redraw
self._redraw()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/interface.py", line 346, in _redraw
self.renderer.render(self, self.layout, is_done=self.is_done)
File "/usr/lib/python3.5/site-packages/prompt_toolkit/renderer.py", line 429, in render
extended_height=size.rows,
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 157, in write_to_screen
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, width, s))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 348, in write_to_screen
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, s, height))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 157, in write_to_screen
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, width, s))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 428, in write_to_screen
width = fl.content.preferred_width(cli, write_position.width).preferred
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 1644, in preferred_width
return self.content.preferred_width(cli, max_available_width)
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 980, in preferred_width
cli, max_available_width - total_margin_width)
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/controls.py", line 249, in preferred_width
text = token_list_to_text(self._get_tokens_cached(cli))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/controls.py", line 239, in _get_tokens_cached
cli.render_counter, lambda: self.get_tokens(cli))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/cache.py", line 35, in get
value = getter_func()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/controls.py", line 239, in <lambda>
cli.render_counter, lambda: self.get_tokens(cli))
File "/usr/lib/python3.5/site-packages/ptpython/layout.py", line 211, in get_tokens
for i, p in enumerate(sig.params):
File "/usr/lib/python3.5/site-packages/jedi/evaluate/cache.py", line 41, in wrapper
rv = function(obj, *args, **kwargs)
File "/usr/lib/python3.5/site-packages/jedi/api/classes.py", line 348, in params
sub = followed.get_subscope_by_name('__init__')
File "/usr/lib/python3.5/site-packages/jedi/evaluate/representation.py", line 563, in __getattr__
return getattr(self.base_func, name)
AttributeError: 'Lambda' object has no attribute 'get_subscope_by_name'
|
KeyError
|
def get_tokens(cli):
result = []
append = result.append
Signature = Token.Toolbar.Signature
if python_input.signatures:
sig = python_input.signatures[0] # Always take the first one.
append((Signature, " "))
try:
append((Signature, sig.full_name))
except IndexError:
# Workaround for #37: https://github.com/jonathanslenders/python-prompt-toolkit/issues/37
# See also: https://github.com/davidhalter/jedi/issues/490
return []
append((Signature.Operator, "("))
try:
enumerated_params = enumerate(sig.params)
except AttributeError:
# Workaround for #136: https://github.com/jonathanslenders/ptpython/issues/136
# AttributeError: 'Lambda' object has no attribute 'get_subscope_by_name'
return []
for i, p in enumerated_params:
# Workaround for #47: 'p' is None when we hit the '*' in the signature.
# and sig has no 'index' attribute.
# See: https://github.com/jonathanslenders/ptpython/issues/47
# https://github.com/davidhalter/jedi/issues/598
description = p.description if p else "*" # or '*'
sig_index = getattr(sig, "index", 0)
if i == sig_index:
# Note: we use `_Param.description` instead of
# `_Param.name`, that way we also get the '*' before args.
append((Signature.CurrentName, str(description)))
else:
append((Signature, str(description)))
append((Signature.Operator, ", "))
if sig.params:
# Pop last comma
result.pop()
append((Signature.Operator, ")"))
append((Signature, " "))
return result
|
def get_tokens(cli):
result = []
append = result.append
Signature = Token.Toolbar.Signature
if python_input.signatures:
sig = python_input.signatures[0] # Always take the first one.
append((Signature, " "))
try:
append((Signature, sig.full_name))
except IndexError:
# Workaround for #37: https://github.com/jonathanslenders/python-prompt-toolkit/issues/37
# See also: https://github.com/davidhalter/jedi/issues/490
return []
append((Signature.Operator, "("))
for i, p in enumerate(sig.params):
# Workaround for #47: 'p' is None when we hit the '*' in the signature.
# and sig has no 'index' attribute.
# See: https://github.com/jonathanslenders/ptpython/issues/47
# https://github.com/davidhalter/jedi/issues/598
description = p.description if p else "*" # or '*'
sig_index = getattr(sig, "index", 0)
if i == sig_index:
# Note: we use `_Param.description` instead of
# `_Param.name`, that way we also get the '*' before args.
append((Signature.CurrentName, str(description)))
else:
append((Signature, str(description)))
append((Signature.Operator, ", "))
if sig.params:
# Pop last comma
result.pop()
append((Signature.Operator, ")"))
append((Signature, " "))
return result
|
https://github.com/prompt-toolkit/ptpython/issues/136
|
from lxml.html import builder as E
E.HTML(
Traceback (most recent call last):
File "/usr/lib/python3.5/site-packages/prompt_toolkit/cache.py", line 32, in get
return self._data[key]
KeyError: 51
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/ptpython", line 9, in <module>
load_entry_point('ptpython==0.35', 'console_scripts', 'ptpython')()
File "/usr/lib/python3.5/site-packages/ptpython/entry_points/run_ptpython.py", line 71, in run
title='Python REPL (ptpython)')
File "/usr/lib/python3.5/site-packages/ptpython/repl.py", line 315, in embed
cli.run()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/interface.py", line 389, in run
self.eventloop.run(self.input, self.create_eventloop_callbacks())
File "/usr/lib/python3.5/site-packages/prompt_toolkit/eventloop/posix.py", line 154, in run
t()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/interface.py", line 326, in redraw
self._redraw()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/interface.py", line 346, in _redraw
self.renderer.render(self, self.layout, is_done=self.is_done)
File "/usr/lib/python3.5/site-packages/prompt_toolkit/renderer.py", line 429, in render
extended_height=size.rows,
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 157, in write_to_screen
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, width, s))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 348, in write_to_screen
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, s, height))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 157, in write_to_screen
c.write_to_screen(cli, screen, mouse_handlers, WritePosition(xpos, ypos, width, s))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 428, in write_to_screen
width = fl.content.preferred_width(cli, write_position.width).preferred
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 1644, in preferred_width
return self.content.preferred_width(cli, max_available_width)
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/containers.py", line 980, in preferred_width
cli, max_available_width - total_margin_width)
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/controls.py", line 249, in preferred_width
text = token_list_to_text(self._get_tokens_cached(cli))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/controls.py", line 239, in _get_tokens_cached
cli.render_counter, lambda: self.get_tokens(cli))
File "/usr/lib/python3.5/site-packages/prompt_toolkit/cache.py", line 35, in get
value = getter_func()
File "/usr/lib/python3.5/site-packages/prompt_toolkit/layout/controls.py", line 239, in <lambda>
cli.render_counter, lambda: self.get_tokens(cli))
File "/usr/lib/python3.5/site-packages/ptpython/layout.py", line 211, in get_tokens
for i, p in enumerate(sig.params):
File "/usr/lib/python3.5/site-packages/jedi/evaluate/cache.py", line 41, in wrapper
rv = function(obj, *args, **kwargs)
File "/usr/lib/python3.5/site-packages/jedi/api/classes.py", line 348, in params
sub = followed.get_subscope_by_name('__init__')
File "/usr/lib/python3.5/site-packages/jedi/evaluate/representation.py", line 563, in __getattr__
return getattr(self.base_func, name)
AttributeError: 'Lambda' object has no attribute 'get_subscope_by_name'
|
KeyError
|
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
checkpoint["dataset_parameters"] = getattr(
self, "dataset_parameters", None
) # add dataset parameters for making fast predictions
# hyper parameters are passed as arguments directly and not as single dictionary
checkpoint["hparams_name"] = "kwargs"
|
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
checkpoint["dataset_parameters"] = getattr(
self, "dataset_parameters", None
) # add dataset parameters for making fast predictions
checkpoint["loss"] = cloudpickle.dumps(self.loss) # restore loss
checkpoint["output_transformer"] = cloudpickle.dumps(
self.output_transformer
) # restore output transformer
# hyper parameters are passed as arguments directly and not as single dictionary
checkpoint["hparams_name"] = "kwargs"
|
https://github.com/jdb78/pytorch-forecasting/issues/294
|
Traceback (most recent call last):
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1741, in <module>
main()
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1735, in main
globals = debugger.run(setup['file'], None, None, is_module)
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1135, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "airflow_dags/predictions.py", line 167, in <module>
main()
File "airflow_dags/predictions.py", line 163, in main
create_predictions(**context)
File "airflow_dags/predictions.py", line 37, in create_predictions
tft = TemporalFusionTransformer.load_from_checkpoint(model_path, map_location=torch.device('cpu'))
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/pytorch_lightning/core/saving.py", line 158, in load_from_checkpoint
model = cls._load_model_state(checkpoint, strict=strict, **kwargs)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/pytorch_lightning/core/saving.py", line 201, in _load_model_state
model.on_load_checkpoint(checkpoint)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py", line 688, in on_load_checkpoint
self.loss = cloudpickle.loads(checkpoint["loss"])
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/storage.py", line 141, in _load_from_bytes
return torch.load(io.BytesIO(b))
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 595, in load
return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 774, in _legacy_load
result = unpickler.load()
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 730, in persistent_load
deserialized_objects[root_key] = restore_location(obj, location)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 175, in default_restore_location
result = fn(storage, location)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 151, in _cuda_deserialize
device = validate_cuda_device(location)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 135, in validate_cuda_device
raise RuntimeError('Attempting to deserialize object on a CUDA '
RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
|
RuntimeError
|
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.dataset_parameters = checkpoint.get("dataset_parameters", None)
|
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.dataset_parameters = checkpoint.get("dataset_parameters", None)
self.loss = cloudpickle.loads(checkpoint["loss"])
self.output_transformer = cloudpickle.loads(checkpoint["output_transformer"])
|
https://github.com/jdb78/pytorch-forecasting/issues/294
|
Traceback (most recent call last):
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1741, in <module>
main()
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1735, in main
globals = debugger.run(setup['file'], None, None, is_module)
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/pydevd.py", line 1135, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "/Applications/PyCharm CE.app/Contents/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "airflow_dags/predictions.py", line 167, in <module>
main()
File "airflow_dags/predictions.py", line 163, in main
create_predictions(**context)
File "airflow_dags/predictions.py", line 37, in create_predictions
tft = TemporalFusionTransformer.load_from_checkpoint(model_path, map_location=torch.device('cpu'))
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/pytorch_lightning/core/saving.py", line 158, in load_from_checkpoint
model = cls._load_model_state(checkpoint, strict=strict, **kwargs)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/pytorch_lightning/core/saving.py", line 201, in _load_model_state
model.on_load_checkpoint(checkpoint)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py", line 688, in on_load_checkpoint
self.loss = cloudpickle.loads(checkpoint["loss"])
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/storage.py", line 141, in _load_from_bytes
return torch.load(io.BytesIO(b))
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 595, in load
return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 774, in _legacy_load
result = unpickler.load()
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 730, in persistent_load
deserialized_objects[root_key] = restore_location(obj, location)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 175, in default_restore_location
result = fn(storage, location)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 151, in _cuda_deserialize
device = validate_cuda_device(location)
File "/Users/dominique.vandamme/code/data-infrastructure/venv/lib/python3.7/site-packages/torch/serialization.py", line 135, in validate_cuda_device
raise RuntimeError('Attempting to deserialize object on a CUDA '
RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
|
RuntimeError
|
def rescale_parameters(
self, parameters: torch.Tensor, target_scale: torch.Tensor, encoder: BaseEstimator
) -> torch.Tensor:
assert encoder.transformation in ["logit"], (
"Beta distribution is only compatible with logit transformation"
)
assert encoder.center, "Beta distribution requires normalizer to center data"
scaled_mean = encoder(
dict(prediction=parameters[..., 0], target_scale=target_scale)
)
# need to first transform target scale standard deviation in logit space to real space
# we assume a normal distribution in logit space (we used a logit transform and a standard scaler)
# and know that the variance of the beta distribution is limited by `scaled_mean * (1 - scaled_mean)`
scaled_mean = (
scaled_mean * (1 - 2 * self.eps) + self.eps
) # ensure that mean is not exactly 0 or 1
mean_derivative = scaled_mean * (1 - scaled_mean)
# we can approximate variance as
# torch.pow(torch.tanh(target_scale[..., 1].unsqueeze(1) * torch.sqrt(mean_derivative)), 2) * mean_derivative
# shape is (positive) parameter * mean_derivative / var
shape_scaler = (
torch.pow(
torch.tanh(target_scale[..., 1].unsqueeze(1) * torch.sqrt(mean_derivative)),
2,
)
+ self.eps
)
scaled_shape = F.softplus(parameters[..., 1]) / shape_scaler
return torch.stack([scaled_mean, scaled_shape], dim=-1)
|
def rescale_parameters(
self, parameters: torch.Tensor, target_scale: torch.Tensor, encoder: BaseEstimator
) -> torch.Tensor:
assert encoder.transformation in ["logit"], (
"Beta distribution is only compatible with logit transformation"
)
assert encoder.center, "Beta distribution requires normalizer to center data"
scaled_mean = encoder(
dict(prediction=parameters[..., 0], target_scale=target_scale)
)
# need to first transform target scale standard deviation in logit space to real space
# we assume a normal distribution in logit space (we used a logit transform and a standard scaler)
# and know that the variance of the beta distribution is limited by `scaled_mean * (1 - scaled_mean)`
mean_derivative = scaled_mean * (1 - scaled_mean)
# we can approximate variance as
# torch.pow(torch.tanh(target_scale[..., 1].unsqueeze(1) * torch.sqrt(mean_derivative)), 2) * mean_derivative
# shape is (positive) parameter * mean_derivative / var
shape_scaler = torch.pow(
torch.tanh(target_scale[..., 1].unsqueeze(1) * torch.sqrt(mean_derivative)), 2
)
scaled_shape = F.softplus(parameters[..., 1]) / shape_scaler
return torch.stack([scaled_mean, scaled_shape], dim=-1)
|
https://github.com/jdb78/pytorch-forecasting/issues/231
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-12-6644968460fc> in <module>
28 )
29
---> 30 trainer.fit(
31 deepar,
32 train_dataloader=train_dataloader,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
468 self.call_hook('on_fit_start')
469
--> 470 results = self.accelerator_backend.train()
471 self.accelerator_backend.teardown()
472
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cpu_accelerator.py in train(self)
60
61 # train or test
---> 62 results = self.train_or_test()
63 return results
64
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
67 results = self.trainer.run_test()
68 else:
---> 69 results = self.trainer.train()
70 return results
71
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
519 with self.profiler.profile("run_training_epoch"):
520 # run train epoch
--> 521 self.train_loop.run_training_epoch()
522
523 if self.max_steps and self.max_steps <= self.global_step:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_epoch(self)
558 # ------------------------------------
559 with self.trainer.profiler.profile("run_training_batch"):
--> 560 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
561
562 # when returning -1 from train_step, we end epoch early
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
716
717 # optimizer step
--> 718 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
719
720 else:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
491
492 # model hook
--> 493 model_ref.optimizer_step(
494 self.trainer.current_epoch,
495 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs)
1255 # wraps into LightingOptimizer only for running step
1256 optimizer = LightningOptimizer.to_lightning_optimizer(optimizer, self.trainer)
-> 1257 optimizer.step(closure=optimizer_closure)
1258
1259 def optimizer_zero_grad(
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, make_optimizer_step, *args, **kwargs)
276
277 if make_optimizer_step:
--> 278 self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
279 else:
280 # make sure to call optimizer_closure when accumulating
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in __optimizer_step(self, closure, profiler_name, *args, **kwargs)
134 else:
135 with trainer.profiler.profile(profiler_name):
--> 136 optimizer.step(closure=closure, *args, **kwargs)
137
138 accelerator_backend = trainer.accelerator_backend
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_forecasting/optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in train_step_and_backward_closure()
706
707 def train_step_and_backward_closure():
--> 708 result = self.training_step_and_backward(
709 split_batch,
710 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
814 # backward pass
815 with self.trainer.profiler.profile("model_backward"):
--> 816 self.backward(result, optimizer, opt_idx)
817
818 # hook - call this hook only
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
834 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
835 else:
--> 836 result.closure_loss = self.trainer.accelerator_backend.backward(
837 result.closure_loss, optimizer, opt_idx, *args, **kwargs
838 )
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
102 # do backward pass
103 model = self.trainer.get_model()
--> 104 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
105
106 # once backward has been applied, release graph
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1149 """
1150 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1151 loss.backward(*args, **kwargs)
1152
1153 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def loss(self, y_pred: torch.Tensor, y_actual: torch.Tensor) -> torch.Tensor:
"""
Calculate negative likelihood
Args:
y_pred: network output
y_actual: actual values
Returns:
torch.Tensor: metric value on which backpropagation can be applied
"""
distribution = self.map_x_to_distribution(y_pred)
# clip y_actual to avoid infinite losses
loss = -distribution.log_prob(y_actual.clip(self.eps, 1 - self.eps))
return loss
|
def loss(self, y_pred: torch.Tensor, y_actual: torch.Tensor) -> torch.Tensor:
"""
Calculate negative likelihood
Args:
y_pred: network output
y_actual: actual values
Returns:
torch.Tensor: metric value on which backpropagation can be applied
"""
distribution = self.map_x_to_distribution(y_pred)
loss = -distribution.log_prob(y_actual)
return loss
|
https://github.com/jdb78/pytorch-forecasting/issues/231
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-12-6644968460fc> in <module>
28 )
29
---> 30 trainer.fit(
31 deepar,
32 train_dataloader=train_dataloader,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
468 self.call_hook('on_fit_start')
469
--> 470 results = self.accelerator_backend.train()
471 self.accelerator_backend.teardown()
472
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cpu_accelerator.py in train(self)
60
61 # train or test
---> 62 results = self.train_or_test()
63 return results
64
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
67 results = self.trainer.run_test()
68 else:
---> 69 results = self.trainer.train()
70 return results
71
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
519 with self.profiler.profile("run_training_epoch"):
520 # run train epoch
--> 521 self.train_loop.run_training_epoch()
522
523 if self.max_steps and self.max_steps <= self.global_step:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_epoch(self)
558 # ------------------------------------
559 with self.trainer.profiler.profile("run_training_batch"):
--> 560 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
561
562 # when returning -1 from train_step, we end epoch early
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
716
717 # optimizer step
--> 718 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
719
720 else:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
491
492 # model hook
--> 493 model_ref.optimizer_step(
494 self.trainer.current_epoch,
495 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs)
1255 # wraps into LightingOptimizer only for running step
1256 optimizer = LightningOptimizer.to_lightning_optimizer(optimizer, self.trainer)
-> 1257 optimizer.step(closure=optimizer_closure)
1258
1259 def optimizer_zero_grad(
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, make_optimizer_step, *args, **kwargs)
276
277 if make_optimizer_step:
--> 278 self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
279 else:
280 # make sure to call optimizer_closure when accumulating
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in __optimizer_step(self, closure, profiler_name, *args, **kwargs)
134 else:
135 with trainer.profiler.profile(profiler_name):
--> 136 optimizer.step(closure=closure, *args, **kwargs)
137
138 accelerator_backend = trainer.accelerator_backend
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_forecasting/optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in train_step_and_backward_closure()
706
707 def train_step_and_backward_closure():
--> 708 result = self.training_step_and_backward(
709 split_batch,
710 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
814 # backward pass
815 with self.trainer.profiler.profile("model_backward"):
--> 816 self.backward(result, optimizer, opt_idx)
817
818 # hook - call this hook only
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
834 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
835 else:
--> 836 result.closure_loss = self.trainer.accelerator_backend.backward(
837 result.closure_loss, optimizer, opt_idx, *args, **kwargs
838 )
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
102 # do backward pass
103 model = self.trainer.get_model()
--> 104 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
105
106 # once backward has been applied, release graph
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1149 """
1150 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1151 loss.backward(*args, **kwargs)
1152
1153 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def optimize_hyperparameters(
train_dataloader: DataLoader,
val_dataloader: DataLoader,
model_path: str,
max_epochs: int = 20,
n_trials: int = 100,
timeout: float = 3600 * 8.0, # 8 hours
gradient_clip_val_range: Tuple[float, float] = (0.01, 100.0),
hidden_size_range: Tuple[int, int] = (16, 265),
hidden_continuous_size_range: Tuple[int, int] = (8, 64),
attention_head_size_range: Tuple[int, int] = (1, 4),
dropout_range: Tuple[float, float] = (0.1, 0.3),
learning_rate_range: Tuple[float, float] = (1e-5, 1.0),
use_learning_rate_finder: bool = True,
trainer_kwargs: Dict[str, Any] = {},
log_dir: str = "lightning_logs",
study: optuna.Study = None,
verbose: Union[int, bool] = None,
**kwargs,
) -> optuna.Study:
"""
Optimize Temporal Fusion Transformer hyperparameters.
Run hyperparameter optimization. Learning rate for is determined with
the PyTorch Lightning learning rate finder.
Args:
train_dataloader (DataLoader): dataloader for training model
val_dataloader (DataLoader): dataloader for validating model
model_path (str): folder to which model checkpoints are saved
max_epochs (int, optional): Maximum number of epochs to run training. Defaults to 20.
n_trials (int, optional): Number of hyperparameter trials to run. Defaults to 100.
timeout (float, optional): Time in seconds after which training is stopped regardless of number of epochs
or validation metric. Defaults to 3600*8.0.
hidden_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_size`` hyperparameter. Defaults
to (16, 265).
hidden_continuous_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_continuous_size``
hyperparameter. Defaults to (8, 64).
attention_head_size_range (Tuple[int, int], optional): Minimum and maximum of ``attention_head_size``
hyperparameter. Defaults to (1, 4).
dropout_range (Tuple[float, float], optional): Minimum and maximum of ``dropout`` hyperparameter. Defaults to
(0.1, 0.3).
learning_rate_range (Tuple[float, float], optional): Learning rate range. Defaults to (1e-5, 1.0).
use_learning_rate_finder (bool): If to use learning rate finder or optimize as part of hyperparameters.
Defaults to True.
trainer_kwargs (Dict[str, Any], optional): Additional arguments to the
`PyTorch Lightning trainer <https://pytorch-lightning.readthedocs.io/en/latest/trainer.html>`_ such
as ``limit_train_batches``. Defaults to {}.
log_dir (str, optional): Folder into which to log results for tensorboard. Defaults to "lightning_logs".
study (optuna.Study, optional): study to resume. Will create new study by default.
verbose (Union[int, bool]): level of verbosity.
* None: no change in verbosity level (equivalent to verbose=1 by optuna-set default).
* 0 or False: log only warnings.
* 1 or True: log pruning events.
* 2: optuna logging level at debug level.
Defaults to None.
**kwargs: Additional arguments for the :py:class:`~TemporalFusionTransformer`.
Returns:
optuna.Study: optuna study results
"""
assert isinstance(train_dataloader.dataset, TimeSeriesDataSet) and isinstance(
val_dataloader.dataset, TimeSeriesDataSet
), "dataloaders must be built from timeseriesdataset"
logging_level = {
None: optuna.logging.get_verbosity(),
0: optuna.logging.WARNING,
1: optuna.logging.INFO,
2: optuna.logging.DEBUG,
}
optuna_verbose = logging_level[verbose]
optuna.logging.set_verbosity(optuna_verbose)
loss = kwargs.get(
"loss", QuantileLoss()
) # need a deepcopy of loss as it will otherwise propagate from one trial to the next
# create objective function
def objective(trial: optuna.Trial) -> float:
# Filenames for each trial must be made unique in order to access each checkpoint.
checkpoint_callback = pl.callbacks.ModelCheckpoint(
os.path.join(model_path, "trial_{}".format(trial.number), "{epoch}"),
monitor="val_loss",
)
# The default logger in PyTorch Lightning writes to event files to be consumed by
# TensorBoard. We don't use any logger here as it requires us to implement several abstract
# methods. Instead we setup a simple callback, that saves metrics from each validation step.
metrics_callback = MetricsCallback()
learning_rate_callback = LearningRateMonitor()
logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
gradient_clip_val = trial.suggest_loguniform(
"gradient_clip_val", *gradient_clip_val_range
)
trainer_kwargs.setdefault("gpus", [0] if torch.cuda.is_available() else None)
trainer = pl.Trainer(
checkpoint_callback=checkpoint_callback,
max_epochs=max_epochs,
gradient_clip_val=gradient_clip_val,
callbacks=[
metrics_callback,
learning_rate_callback,
PyTorchLightningPruningCallback(trial, monitor="val_loss"),
],
logger=logger,
progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],
weights_summary=[None, "top"][optuna_verbose < optuna.logging.INFO],
**trainer_kwargs,
)
# create model
hidden_size = trial.suggest_int("hidden_size", *hidden_size_range, log=True)
kwargs["loss"] = copy.deepcopy(loss)
model = TemporalFusionTransformer.from_dataset(
train_dataloader.dataset,
dropout=trial.suggest_uniform("dropout", *dropout_range),
hidden_size=hidden_size,
hidden_continuous_size=trial.suggest_int(
"hidden_continuous_size",
hidden_continuous_size_range[0],
min(hidden_continuous_size_range[1], hidden_size),
log=True,
),
attention_head_size=trial.suggest_int(
"attention_head_size", *attention_head_size_range
),
log_interval=-1,
**kwargs,
)
# find good learning rate
if use_learning_rate_finder:
lr_trainer = pl.Trainer(
gradient_clip_val=gradient_clip_val,
gpus=[0] if torch.cuda.is_available() else None,
logger=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
res = lr_trainer.tuner.lr_find(
model,
train_dataloader=train_dataloader,
val_dataloaders=val_dataloader,
early_stop_threshold=10000,
min_lr=learning_rate_range[0],
num_training=100,
max_lr=learning_rate_range[1],
)
loss_finite = np.isfinite(res.results["loss"])
if (
loss_finite.sum() > 3
): # at least 3 valid values required for learning rate finder
lr_smoothed, loss_smoothed = sm.nonparametric.lowess(
np.asarray(res.results["loss"])[loss_finite],
np.asarray(res.results["lr"])[loss_finite],
frac=1.0 / 10.0,
)[min(loss_finite.sum() - 3, 10) : -1].T
optimal_idx = np.gradient(loss_smoothed).argmin()
optimal_lr = lr_smoothed[optimal_idx]
else:
optimal_idx = np.asarray(res.results["loss"]).argmin()
optimal_lr = res.results["lr"][optimal_idx]
optuna_logger.info(f"Using learning rate of {optimal_lr:.3g}")
# add learning rate artificially
model.hparams.learning_rate = trial.suggest_uniform(
"learning_rate", optimal_lr, optimal_lr
)
else:
model.hparams.learning_rate = trial.suggest_loguniform(
"learning_rate", *learning_rate_range
)
# fit
trainer.fit(
model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader
)
# report result
return metrics_callback.metrics[-1]["val_loss"].item()
# setup optuna and run
pruner = optuna.pruners.SuccessiveHalvingPruner()
if study is None:
study = optuna.create_study(direction="minimize", pruner=pruner)
study.optimize(objective, n_trials=n_trials, timeout=timeout)
return study
|
def optimize_hyperparameters(
train_dataloader: DataLoader,
val_dataloader: DataLoader,
model_path: str,
max_epochs: int = 20,
n_trials: int = 100,
timeout: float = 3600 * 8.0, # 8 hours
gradient_clip_val_range: Tuple[float, float] = (0.01, 100.0),
hidden_size_range: Tuple[int, int] = (16, 265),
hidden_continuous_size_range: Tuple[int, int] = (8, 64),
attention_head_size_range: Tuple[int, int] = (1, 4),
dropout_range: Tuple[float, float] = (0.1, 0.3),
learning_rate_range: Tuple[float, float] = (1e-5, 1.0),
use_learning_rate_finder: bool = True,
trainer_kwargs: Dict[str, Any] = {},
log_dir: str = "lightning_logs",
study: optuna.Study = None,
verbose: Union[int, bool] = None,
**kwargs,
) -> optuna.Study:
"""
Optimize Temporal Fusion Transformer hyperparameters.
Run hyperparameter optimization. Learning rate for is determined with
the PyTorch Lightning learning rate finder.
Args:
train_dataloader (DataLoader): dataloader for training model
val_dataloader (DataLoader): dataloader for validating model
model_path (str): folder to which model checkpoints are saved
max_epochs (int, optional): Maximum number of epochs to run training. Defaults to 20.
n_trials (int, optional): Number of hyperparameter trials to run. Defaults to 100.
timeout (float, optional): Time in seconds after which training is stopped regardless of number of epochs
or validation metric. Defaults to 3600*8.0.
hidden_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_size`` hyperparameter. Defaults
to (16, 265).
hidden_continuous_size_range (Tuple[int, int], optional): Minimum and maximum of ``hidden_continuous_size``
hyperparameter. Defaults to (8, 64).
attention_head_size_range (Tuple[int, int], optional): Minimum and maximum of ``attention_head_size``
hyperparameter. Defaults to (1, 4).
dropout_range (Tuple[float, float], optional): Minimum and maximum of ``dropout`` hyperparameter. Defaults to
(0.1, 0.3).
learning_rate_range (Tuple[float, float], optional): Learning rate range. Defaults to (1e-5, 1.0).
use_learning_rate_finder (bool): If to use learning rate finder or optimize as part of hyperparameters.
Defaults to True.
trainer_kwargs (Dict[str, Any], optional): Additional arguments to the
`PyTorch Lightning trainer <https://pytorch-lightning.readthedocs.io/en/latest/trainer.html>`_ such
as ``limit_train_batches``. Defaults to {}.
log_dir (str, optional): Folder into which to log results for tensorboard. Defaults to "lightning_logs".
study (optuna.Study, optional): study to resume. Will create new study by default.
verbose (Union[int, bool]): level of verbosity.
* None: no change in verbosity level (equivalent to verbose=1 by optuna-set default).
* 0 or False: log only warnings.
* 1 or True: log pruning events.
* 2: optuna logging level at debug level.
Defaults to None.
**kwargs: Additional arguments for the :py:class:`~TemporalFusionTransformer`.
Returns:
optuna.Study: optuna study results
"""
assert isinstance(train_dataloader.dataset, TimeSeriesDataSet) and isinstance(
val_dataloader.dataset, TimeSeriesDataSet
), "dataloaders must be built from timeseriesdataset"
logging_level = {
None: optuna.logging.get_verbosity(),
0: optuna.logging.WARNING,
1: optuna.logging.INFO,
2: optuna.logging.DEBUG,
}
optuna_verbose = logging_level[verbose]
optuna.logging.set_verbosity(optuna_verbose)
loss = kwargs.get(
"loss", QuantileLoss()
) # need a deepcopy of loss as it will otherwise propagate from one trial to the next
# create objective function
def objective(trial: optuna.Trial) -> float:
# Filenames for each trial must be made unique in order to access each checkpoint.
checkpoint_callback = pl.callbacks.ModelCheckpoint(
os.path.join(model_path, "trial_{}".format(trial.number), "{epoch}"),
monitor="val_loss",
)
# The default logger in PyTorch Lightning writes to event files to be consumed by
# TensorBoard. We don't use any logger here as it requires us to implement several abstract
# methods. Instead we setup a simple callback, that saves metrics from each validation step.
metrics_callback = MetricsCallback()
learning_rate_callback = LearningRateMonitor()
logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
gradient_clip_val = trial.suggest_loguniform(
"gradient_clip_val", *gradient_clip_val_range
)
trainer_kwargs.setdefault("gpus", [0] if torch.cuda.is_available() else None)
trainer = pl.Trainer(
checkpoint_callback=checkpoint_callback,
max_epochs=max_epochs,
gradient_clip_val=gradient_clip_val,
callbacks=[
metrics_callback,
learning_rate_callback,
PyTorchLightningPruningCallback(trial, monitor="val_loss"),
],
logger=logger,
progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],
weights_summary=[None, "top"][optuna_verbose < optuna.logging.INFO],
**trainer_kwargs,
)
# create model
hidden_size = trial.suggest_int("hidden_size", *hidden_size_range, log=True)
kwargs["loss"] = copy.deepcopy(loss)
model = TemporalFusionTransformer.from_dataset(
train_dataloader.dataset,
dropout=trial.suggest_uniform("dropout", *dropout_range),
hidden_size=hidden_size,
hidden_continuous_size=trial.suggest_int(
"hidden_continuous_size",
hidden_continuous_size_range[0],
min(hidden_continuous_size_range[1], hidden_size),
log=True,
),
attention_head_size=trial.suggest_int(
"attention_head_size", *attention_head_size_range
),
log_interval=-1,
**kwargs,
)
# find good learning rate
if use_learning_rate_finder:
lr_trainer = pl.Trainer(
gradient_clip_val=gradient_clip_val,
gpus=[0] if torch.cuda.is_available() else None,
logger=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
res = lr_trainer.tuner.lr_find(
model,
train_dataloader=train_dataloader,
val_dataloaders=val_dataloader,
early_stop_threshold=10000,
min_lr=learning_rate_range[0],
num_training=100,
max_lr=learning_rate_range[1],
)
loss_finite = np.isfinite(res.results["loss"])
lr_smoothed, loss_smoothed = sm.nonparametric.lowess(
np.asarray(res.results["loss"])[loss_finite],
np.asarray(res.results["lr"])[loss_finite],
frac=1.0 / 10.0,
)[10:-1].T
optimal_idx = np.gradient(loss_smoothed).argmin()
optimal_lr = lr_smoothed[optimal_idx]
optuna_logger.info(f"Using learning rate of {optimal_lr:.3g}")
# add learning rate artificially
model.hparams.learning_rate = trial.suggest_uniform(
"learning_rate", optimal_lr, optimal_lr
)
else:
model.hparams.learning_rate = trial.suggest_loguniform(
"learning_rate", *learning_rate_range
)
# fit
trainer.fit(
model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader
)
# report result
return metrics_callback.metrics[-1]["val_loss"].item()
# setup optuna and run
pruner = optuna.pruners.SuccessiveHalvingPruner()
if study is None:
study = optuna.create_study(direction="minimize", pruner=pruner)
study.optimize(objective, n_trials=n_trials, timeout=timeout)
return study
|
https://github.com/jdb78/pytorch-forecasting/issues/231
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-12-6644968460fc> in <module>
28 )
29
---> 30 trainer.fit(
31 deepar,
32 train_dataloader=train_dataloader,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
468 self.call_hook('on_fit_start')
469
--> 470 results = self.accelerator_backend.train()
471 self.accelerator_backend.teardown()
472
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cpu_accelerator.py in train(self)
60
61 # train or test
---> 62 results = self.train_or_test()
63 return results
64
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
67 results = self.trainer.run_test()
68 else:
---> 69 results = self.trainer.train()
70 return results
71
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
519 with self.profiler.profile("run_training_epoch"):
520 # run train epoch
--> 521 self.train_loop.run_training_epoch()
522
523 if self.max_steps and self.max_steps <= self.global_step:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_epoch(self)
558 # ------------------------------------
559 with self.trainer.profiler.profile("run_training_batch"):
--> 560 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
561
562 # when returning -1 from train_step, we end epoch early
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
716
717 # optimizer step
--> 718 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
719
720 else:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
491
492 # model hook
--> 493 model_ref.optimizer_step(
494 self.trainer.current_epoch,
495 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs)
1255 # wraps into LightingOptimizer only for running step
1256 optimizer = LightningOptimizer.to_lightning_optimizer(optimizer, self.trainer)
-> 1257 optimizer.step(closure=optimizer_closure)
1258
1259 def optimizer_zero_grad(
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, make_optimizer_step, *args, **kwargs)
276
277 if make_optimizer_step:
--> 278 self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
279 else:
280 # make sure to call optimizer_closure when accumulating
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in __optimizer_step(self, closure, profiler_name, *args, **kwargs)
134 else:
135 with trainer.profiler.profile(profiler_name):
--> 136 optimizer.step(closure=closure, *args, **kwargs)
137
138 accelerator_backend = trainer.accelerator_backend
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_forecasting/optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in train_step_and_backward_closure()
706
707 def train_step_and_backward_closure():
--> 708 result = self.training_step_and_backward(
709 split_batch,
710 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
814 # backward pass
815 with self.trainer.profiler.profile("model_backward"):
--> 816 self.backward(result, optimizer, opt_idx)
817
818 # hook - call this hook only
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
834 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
835 else:
--> 836 result.closure_loss = self.trainer.accelerator_backend.backward(
837 result.closure_loss, optimizer, opt_idx, *args, **kwargs
838 )
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
102 # do backward pass
103 model = self.trainer.get_model()
--> 104 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
105
106 # once backward has been applied, release graph
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1149 """
1150 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1151 loss.backward(*args, **kwargs)
1152
1153 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def objective(trial: optuna.Trial) -> float:
# Filenames for each trial must be made unique in order to access each checkpoint.
checkpoint_callback = pl.callbacks.ModelCheckpoint(
os.path.join(model_path, "trial_{}".format(trial.number), "{epoch}"),
monitor="val_loss",
)
# The default logger in PyTorch Lightning writes to event files to be consumed by
# TensorBoard. We don't use any logger here as it requires us to implement several abstract
# methods. Instead we setup a simple callback, that saves metrics from each validation step.
metrics_callback = MetricsCallback()
learning_rate_callback = LearningRateMonitor()
logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
gradient_clip_val = trial.suggest_loguniform(
"gradient_clip_val", *gradient_clip_val_range
)
trainer_kwargs.setdefault("gpus", [0] if torch.cuda.is_available() else None)
trainer = pl.Trainer(
checkpoint_callback=checkpoint_callback,
max_epochs=max_epochs,
gradient_clip_val=gradient_clip_val,
callbacks=[
metrics_callback,
learning_rate_callback,
PyTorchLightningPruningCallback(trial, monitor="val_loss"),
],
logger=logger,
progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],
weights_summary=[None, "top"][optuna_verbose < optuna.logging.INFO],
**trainer_kwargs,
)
# create model
hidden_size = trial.suggest_int("hidden_size", *hidden_size_range, log=True)
kwargs["loss"] = copy.deepcopy(loss)
model = TemporalFusionTransformer.from_dataset(
train_dataloader.dataset,
dropout=trial.suggest_uniform("dropout", *dropout_range),
hidden_size=hidden_size,
hidden_continuous_size=trial.suggest_int(
"hidden_continuous_size",
hidden_continuous_size_range[0],
min(hidden_continuous_size_range[1], hidden_size),
log=True,
),
attention_head_size=trial.suggest_int(
"attention_head_size", *attention_head_size_range
),
log_interval=-1,
**kwargs,
)
# find good learning rate
if use_learning_rate_finder:
lr_trainer = pl.Trainer(
gradient_clip_val=gradient_clip_val,
gpus=[0] if torch.cuda.is_available() else None,
logger=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
res = lr_trainer.tuner.lr_find(
model,
train_dataloader=train_dataloader,
val_dataloaders=val_dataloader,
early_stop_threshold=10000,
min_lr=learning_rate_range[0],
num_training=100,
max_lr=learning_rate_range[1],
)
loss_finite = np.isfinite(res.results["loss"])
if (
loss_finite.sum() > 3
): # at least 3 valid values required for learning rate finder
lr_smoothed, loss_smoothed = sm.nonparametric.lowess(
np.asarray(res.results["loss"])[loss_finite],
np.asarray(res.results["lr"])[loss_finite],
frac=1.0 / 10.0,
)[min(loss_finite.sum() - 3, 10) : -1].T
optimal_idx = np.gradient(loss_smoothed).argmin()
optimal_lr = lr_smoothed[optimal_idx]
else:
optimal_idx = np.asarray(res.results["loss"]).argmin()
optimal_lr = res.results["lr"][optimal_idx]
optuna_logger.info(f"Using learning rate of {optimal_lr:.3g}")
# add learning rate artificially
model.hparams.learning_rate = trial.suggest_uniform(
"learning_rate", optimal_lr, optimal_lr
)
else:
model.hparams.learning_rate = trial.suggest_loguniform(
"learning_rate", *learning_rate_range
)
# fit
trainer.fit(
model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader
)
# report result
return metrics_callback.metrics[-1]["val_loss"].item()
|
def objective(trial: optuna.Trial) -> float:
# Filenames for each trial must be made unique in order to access each checkpoint.
checkpoint_callback = pl.callbacks.ModelCheckpoint(
os.path.join(model_path, "trial_{}".format(trial.number), "{epoch}"),
monitor="val_loss",
)
# The default logger in PyTorch Lightning writes to event files to be consumed by
# TensorBoard. We don't use any logger here as it requires us to implement several abstract
# methods. Instead we setup a simple callback, that saves metrics from each validation step.
metrics_callback = MetricsCallback()
learning_rate_callback = LearningRateMonitor()
logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
gradient_clip_val = trial.suggest_loguniform(
"gradient_clip_val", *gradient_clip_val_range
)
trainer_kwargs.setdefault("gpus", [0] if torch.cuda.is_available() else None)
trainer = pl.Trainer(
checkpoint_callback=checkpoint_callback,
max_epochs=max_epochs,
gradient_clip_val=gradient_clip_val,
callbacks=[
metrics_callback,
learning_rate_callback,
PyTorchLightningPruningCallback(trial, monitor="val_loss"),
],
logger=logger,
progress_bar_refresh_rate=[0, 1][optuna_verbose < optuna.logging.INFO],
weights_summary=[None, "top"][optuna_verbose < optuna.logging.INFO],
**trainer_kwargs,
)
# create model
hidden_size = trial.suggest_int("hidden_size", *hidden_size_range, log=True)
kwargs["loss"] = copy.deepcopy(loss)
model = TemporalFusionTransformer.from_dataset(
train_dataloader.dataset,
dropout=trial.suggest_uniform("dropout", *dropout_range),
hidden_size=hidden_size,
hidden_continuous_size=trial.suggest_int(
"hidden_continuous_size",
hidden_continuous_size_range[0],
min(hidden_continuous_size_range[1], hidden_size),
log=True,
),
attention_head_size=trial.suggest_int(
"attention_head_size", *attention_head_size_range
),
log_interval=-1,
**kwargs,
)
# find good learning rate
if use_learning_rate_finder:
lr_trainer = pl.Trainer(
gradient_clip_val=gradient_clip_val,
gpus=[0] if torch.cuda.is_available() else None,
logger=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
res = lr_trainer.tuner.lr_find(
model,
train_dataloader=train_dataloader,
val_dataloaders=val_dataloader,
early_stop_threshold=10000,
min_lr=learning_rate_range[0],
num_training=100,
max_lr=learning_rate_range[1],
)
loss_finite = np.isfinite(res.results["loss"])
lr_smoothed, loss_smoothed = sm.nonparametric.lowess(
np.asarray(res.results["loss"])[loss_finite],
np.asarray(res.results["lr"])[loss_finite],
frac=1.0 / 10.0,
)[10:-1].T
optimal_idx = np.gradient(loss_smoothed).argmin()
optimal_lr = lr_smoothed[optimal_idx]
optuna_logger.info(f"Using learning rate of {optimal_lr:.3g}")
# add learning rate artificially
model.hparams.learning_rate = trial.suggest_uniform(
"learning_rate", optimal_lr, optimal_lr
)
else:
model.hparams.learning_rate = trial.suggest_loguniform(
"learning_rate", *learning_rate_range
)
# fit
trainer.fit(
model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader
)
# report result
return metrics_callback.metrics[-1]["val_loss"].item()
|
https://github.com/jdb78/pytorch-forecasting/issues/231
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-12-6644968460fc> in <module>
28 )
29
---> 30 trainer.fit(
31 deepar,
32 train_dataloader=train_dataloader,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
468 self.call_hook('on_fit_start')
469
--> 470 results = self.accelerator_backend.train()
471 self.accelerator_backend.teardown()
472
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cpu_accelerator.py in train(self)
60
61 # train or test
---> 62 results = self.train_or_test()
63 return results
64
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
67 results = self.trainer.run_test()
68 else:
---> 69 results = self.trainer.train()
70 return results
71
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
519 with self.profiler.profile("run_training_epoch"):
520 # run train epoch
--> 521 self.train_loop.run_training_epoch()
522
523 if self.max_steps and self.max_steps <= self.global_step:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_epoch(self)
558 # ------------------------------------
559 with self.trainer.profiler.profile("run_training_batch"):
--> 560 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
561
562 # when returning -1 from train_step, we end epoch early
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
716
717 # optimizer step
--> 718 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
719
720 else:
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
491
492 # model hook
--> 493 model_ref.optimizer_step(
494 self.trainer.current_epoch,
495 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs)
1255 # wraps into LightingOptimizer only for running step
1256 optimizer = LightningOptimizer.to_lightning_optimizer(optimizer, self.trainer)
-> 1257 optimizer.step(closure=optimizer_closure)
1258
1259 def optimizer_zero_grad(
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in step(self, closure, make_optimizer_step, *args, **kwargs)
276
277 if make_optimizer_step:
--> 278 self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
279 else:
280 # make sure to call optimizer_closure when accumulating
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/optimizer.py in __optimizer_step(self, closure, profiler_name, *args, **kwargs)
134 else:
135 with trainer.profiler.profile(profiler_name):
--> 136 optimizer.step(closure=closure, *args, **kwargs)
137
138 accelerator_backend = trainer.accelerator_backend
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_forecasting/optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in train_step_and_backward_closure()
706
707 def train_step_and_backward_closure():
--> 708 result = self.training_step_and_backward(
709 split_batch,
710 batch_idx,
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
814 # backward pass
815 with self.trainer.profiler.profile("model_backward"):
--> 816 self.backward(result, optimizer, opt_idx)
817
818 # hook - call this hook only
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
834 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
835 else:
--> 836 result.closure_loss = self.trainer.accelerator_backend.backward(
837 result.closure_loss, optimizer, opt_idx, *args, **kwargs
838 )
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
102 # do backward pass
103 model = self.trainer.get_model()
--> 104 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
105
106 # once backward has been applied, release graph
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1149 """
1150 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1151 loss.backward(*args, **kwargs)
1152
1153 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
~/miniconda3/envs/smartsilex-eda/lib/python3.8/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def __init__(
self,
data: pd.DataFrame,
time_idx: str,
target: Union[str, List[str]],
group_ids: List[str],
weight: Union[str, None] = None,
max_encoder_length: int = 30,
min_encoder_length: int = None,
min_prediction_idx: int = None,
min_prediction_length: int = None,
max_prediction_length: int = 1,
static_categoricals: List[str] = [],
static_reals: List[str] = [],
time_varying_known_categoricals: List[str] = [],
time_varying_known_reals: List[str] = [],
time_varying_unknown_categoricals: List[str] = [],
time_varying_unknown_reals: List[str] = [],
variable_groups: Dict[str, List[int]] = {},
dropout_categoricals: List[str] = [],
constant_fill_strategy={},
allow_missings: bool = False,
add_relative_time_idx: bool = False,
add_target_scales: bool = False,
add_encoder_length: Union[bool, str] = "auto",
target_normalizer: Union[
TorchNormalizer, NaNLabelEncoder, EncoderNormalizer, str
] = "auto",
categorical_encoders={},
scalers={},
randomize_length: Union[None, Tuple[float, float], bool] = False,
predict_mode: bool = False,
):
"""
Timeseries dataset holding data for models.
The :ref:`tutorial on passing data to models <passing-data>` is helpful to understand the output of the dataset
and how it is coupled to models.
Each sample is a subsequence of a full time series. The subsequence consists of encoder and decoder/prediction
timepoints for a given time series. This class constructs an index which defined which subsequences exists and
can be samples from (``index`` attribute). The samples in the index are defined by by the various parameters.
to the class (encoder and prediction lengths, minimum prediction length, randomize length and predict keywords).
How samples are
sampled into batches for training, is determined by the DataLoader. The class provides the
:py:meth:`~TimeSeriesDataSet.to_dataloader` method to convert the dataset into a dataloader.
Large datasets:
Currently the class is limited to in-memory operations (that can be sped up by an
existing installation of `numba <https://pypi.org/project/numba/>`_). If you have extremely large data,
however, you can pass prefitted encoders and and scalers to it and a subset of sequences to the class to
construct a valid dataset (plus, likely the EncoderNormalizer should be used to normalize targets).
when fitting a network, you would then to create a custom DataLoader that rotates through the datasets.
There is currently no in-built methods to do this.
Args:
data: dataframe with sequence data - each row can be identified with ``time_idx`` and the ``group_ids``
time_idx: integer column denoting the time index. This columns is used to determine the sequence of samples.
If there no missings observations, the time index should increase by ``+1`` for each subsequent sample.
The first time_idx for each series does not necessarily have to be ``0`` but any value is allowed.
target: column denoting the target or list of columns denoting the target - categorical or continous.
group_ids: list of column names identifying a time series. This means that the ``group_ids`` identify
a sample together with the ``time_idx``. If you have only one timeseries, set this to the
name of column that is constant.
weight: column name for weights. Defaults to None.
max_encoder_length: maximum length to encode
min_encoder_length: minimum allowed length to encode. Defaults to max_encoder_length.
min_prediction_idx: minimum ``time_idx`` from where to start predictions. This parameter can be useful to
create a validation or test set.
max_prediction_length: maximum prediction/decoder length (choose this not too short as it can help
convergence)
min_prediction_length: minimum prediction/decoder length. Defaults to max_prediction_length
static_categoricals: list of categorical variables that do not change over time,
entries can be also lists which are then encoded together
(e.g. useful for product categories)
static_reals: list of continuous variables that do not change over time
time_varying_known_categoricals: list of categorical variables that change over
time and are know in the future, entries can be also lists which are then encoded together
(e.g. useful for special days or promotion categories)
time_varying_known_reals: list of continuous variables that change over
time and are know in the future
time_varying_unknown_categoricals: list of categorical variables that change over
time and are not know in the future, entries can be also lists which are then encoded together
(e.g. useful for weather categories)
time_varying_unknown_reals: list of continuous variables that change over
time and are not know in the future
variable_groups: dictionary mapping a name to a list of columns in the data. The name should be present
in a categorical or real class argument, to be able to encode or scale the columns by group.
dropout_categoricals: list of categorical variables that are unknown when making a forecast without
observed history
constant_fill_strategy: dictionary of column names with constants to fill in missing values if there are
gaps in the sequence (by default forward fill strategy is used). The values will be only used if
``allow_missings=True``. A common use case is to denote that demand was 0 if the sample is not in
the dataset.
allow_missings: if to allow missing timesteps that are automatically filled up. Missing values
refer to gaps in the ``time_idx``, e.g. if a specific timeseries has only samples for
1, 2, 4, 5, the sample for 3 will be generated on-the-fly.
Allow missings does not deal with ``NA`` values. You should fill NA values before
passing the dataframe to the TimeSeriesDataSet.
add_relative_time_idx: if to add a relative time index as feature (i.e. for each sampled sequence, the index
will range from -encoder_length to prediction_length)
add_target_scales: if to add scales for target to static real features (i.e. add the center and scale
of the unnormalized timeseries as features)
add_encoder_length: if to add decoder length to list of static real variables. Defaults to "auto",
i.e. yes if ``min_encoder_length != max_encoder_length``.
target_normalizer: transformer that takes group_ids, target and time_idx to return normalized targets.
You can choose from :py:class:`~TorchNormalizer`, :py:class:`~NaNLabelEncoder`,
:py:class:`~EncoderNormalizer` or `None` for using not normalizer.
By default an appropriate normalizer is chosen automatically.
categorical_encoders: dictionary of scikit learn label transformers. If you have unobserved categories in
the future, you can use the :py:class:`~pytorch_forecasting.encoders.NaNLabelEncoder` with
``add_nan=True``. Defaults effectively to sklearn's ``LabelEncoder()``. Prefittet encoders will not
be fit again.
scalers: dictionary of scikit learn scalers. Defaults to sklearn's ``StandardScaler()``.
Other options are :py:class:`~pytorch_forecasting.data.encoders.EncoderNormalizer`,
:py:class:`~pytorch_forecasting.data.encoders.GroupNormalizer` or scikit-learn's ``StandarScaler()``,
``RobustScaler()`` or `None` for using no normalizer / normalizer with `center=0` and `scale=1`
(`method="identity"`).
Prefittet encoders will not be fit again (with the exception of the
:py:class:`~pytorch_forecasting.data.encoders.EncoderNormalizer` that is fit on every encoder sequence).
randomize_length: None or False if not to randomize lengths. Tuple of beta distribution concentrations
from which
probabilities are sampled that are used to sample new sequence lengths with a binomial
distribution.
If True, defaults to (0.2, 0.05), i.e. ~1/4 of samples around minimum encoder length.
Defaults to False otherwise.
predict_mode: if to only iterate over each timeseries once (only the last provided samples).
Effectively, this will take choose for each time series identified by ``group_ids``
the last ``max_prediction_length`` samples of each time series as
prediction samples and everthing previous up to ``max_encoder_length`` samples as encoder samples.
"""
super().__init__()
self.max_encoder_length = max_encoder_length
assert isinstance(self.max_encoder_length, int), (
"max encoder length must be integer"
)
if min_encoder_length is None:
min_encoder_length = max_encoder_length
self.min_encoder_length = min_encoder_length
assert self.min_encoder_length <= self.max_encoder_length, (
"max encoder length has to be larger equals min encoder length"
)
assert isinstance(self.min_encoder_length, int), (
"min encoder length must be integer"
)
self.max_prediction_length = max_prediction_length
assert isinstance(self.max_prediction_length, int), (
"max prediction length must be integer"
)
if min_prediction_length is None:
min_prediction_length = max_prediction_length
self.min_prediction_length = min_prediction_length
assert self.min_prediction_length <= self.max_prediction_length, (
"max prediction length has to be larger equals min prediction length"
)
assert self.min_prediction_length > 0, "min prediction length must be larger than 0"
assert isinstance(self.min_prediction_length, int), (
"min prediction length must be integer"
)
self.target = target
self.weight = weight
self.time_idx = time_idx
self.group_ids = [] + group_ids
self.static_categoricals = [] + static_categoricals
self.static_reals = [] + static_reals
self.time_varying_known_categoricals = [] + time_varying_known_categoricals
self.time_varying_known_reals = [] + time_varying_known_reals
self.time_varying_unknown_categoricals = [] + time_varying_unknown_categoricals
self.time_varying_unknown_reals = [] + time_varying_unknown_reals
self.dropout_categoricals = [] + dropout_categoricals
self.add_relative_time_idx = add_relative_time_idx
# set automatic defaults
if isinstance(randomize_length, bool):
if not randomize_length:
randomize_length = None
else:
randomize_length = (0.2, 0.05)
self.randomize_length = randomize_length
if min_prediction_idx is None:
min_prediction_idx = data[self.time_idx].min()
self.min_prediction_idx = min_prediction_idx
self.constant_fill_strategy = (
{} if len(constant_fill_strategy) == 0 else constant_fill_strategy
)
self.predict_mode = predict_mode
self.allow_missings = allow_missings
self.target_normalizer = target_normalizer
self.categorical_encoders = (
{} if len(categorical_encoders) == 0 else categorical_encoders
)
self.scalers = {} if len(scalers) == 0 else scalers
self.add_target_scales = add_target_scales
self.variable_groups = {} if len(variable_groups) == 0 else variable_groups
# add_encoder_length
if isinstance(add_encoder_length, str):
assert add_encoder_length == "auto", (
f"Only 'auto' allowed for add_encoder_length but found {add_encoder_length}"
)
add_encoder_length = self.min_encoder_length != self.max_encoder_length
assert isinstance(add_encoder_length, bool), (
f"add_encoder_length should be boolean or 'auto' but found {add_encoder_length}"
)
self.add_encoder_length = add_encoder_length
# target normalizer
self._set_target_normalizer(data)
# overwrite values
self.reset_overwrite_values()
for target in self.target_names:
assert target not in self.time_varying_known_reals, (
f"target {target} should be an unknown continuous variable in the future"
)
# set data
assert data.index.is_unique, "data index has to be unique"
if min_prediction_idx is not None:
data = data[
lambda x: data[self.time_idx]
>= self.min_prediction_idx - self.max_encoder_length
]
data = data.sort_values(self.group_ids + [self.time_idx])
# add time index relative to prediction position
if self.add_relative_time_idx:
assert "relative_time_idx" not in data.columns, (
"relative_time_idx is a protected column and must not be present in data"
)
if (
"relative_time_idx" not in self.time_varying_known_reals
and "relative_time_idx" not in self.reals
):
self.time_varying_known_reals.append("relative_time_idx")
data["relative_time_idx"] = (
0.0 # dummy - real value will be set dynamiclly in __getitem__()
)
# add decoder length to static real variables
if self.add_encoder_length:
assert "encoder_length" not in data.columns, (
"encoder_length is a protected column and must not be present in data"
)
if (
"encoder_length" not in self.time_varying_known_reals
and "encoder_length" not in self.reals
):
self.static_reals.append("encoder_length")
data["encoder_length"] = (
0 # dummy - real value will be set dynamiclly in __getitem__()
)
# validate
self._validate_data(data)
# preprocess data
data = self._preprocess_data(data)
for target in self.target_names:
assert target not in self.scalers, (
"Target normalizer is separate and not in scalers."
)
# create index
self.index = self._construct_index(data, predict_mode=predict_mode)
# convert to torch tensor for high performance data loading later
self.data = self._data_to_tensors(data)
|
def __init__(
self,
data: pd.DataFrame,
time_idx: str,
target: Union[str, List[str]],
group_ids: List[str],
weight: Union[str, None] = None,
max_encoder_length: int = 30,
min_encoder_length: int = None,
min_prediction_idx: int = None,
min_prediction_length: int = None,
max_prediction_length: int = 1,
static_categoricals: List[str] = [],
static_reals: List[str] = [],
time_varying_known_categoricals: List[str] = [],
time_varying_known_reals: List[str] = [],
time_varying_unknown_categoricals: List[str] = [],
time_varying_unknown_reals: List[str] = [],
variable_groups: Dict[str, List[int]] = {},
dropout_categoricals: List[str] = [],
constant_fill_strategy={},
allow_missings: bool = False,
add_relative_time_idx: bool = False,
add_target_scales: bool = False,
add_encoder_length: Union[bool, str] = "auto",
target_normalizer: Union[
TorchNormalizer, NaNLabelEncoder, EncoderNormalizer, str
] = "auto",
categorical_encoders={},
scalers={},
randomize_length: Union[None, Tuple[float, float], bool] = False,
predict_mode: bool = False,
):
"""
Timeseries dataset holding data for models.
The :ref:`tutorial on passing data to models <passing-data>` is helpful to understand the output of the dataset
and how it is coupled to models.
Each sample is a subsequence of a full time series. The subsequence consists of encoder and decoder/prediction
timepoints for a given time series. This class constructs an index which defined which subsequences exists and
can be samples from (``index`` attribute). The samples in the index are defined by by the various parameters.
to the class (encoder and prediction lengths, minimum prediction length, randomize length and predict keywords).
How samples are
sampled into batches for training, is determined by the DataLoader. The class provides the
:py:meth:`~TimeSeriesDataSet.to_dataloader` method to convert the dataset into a dataloader.
Large datasets:
Currently the class is limited to in-memory operations. If you have extremely large data,
however, you can pass prefitted encoders and and scalers to it and a subset of sequences to the class to
construct a valid dataset (plus, likely the EncoderNormalizer should be used to normalize targets).
when fitting a network, you would then to create a custom DataLoader that rotates through the datasets.
There is currently no in-built methods to do this.
Args:
data: dataframe with sequence data - each row can be identified with ``time_idx`` and the ``group_ids``
time_idx: integer column denoting the time index. This columns is used to determine the sequence of samples.
If there no missings observations, the time index should increase by ``+1`` for each subsequent sample.
The first time_idx for each series does not necessarily have to be ``0`` but any value is allowed.
target: column denoting the target or list of columns denoting the target - categorical or continous.
group_ids: list of column names identifying a time series. This means that the ``group_ids`` identify
a sample together with the ``time_idx``. If you have only one timeseries, set this to the
name of column that is constant.
weight: column name for weights. Defaults to None.
max_encoder_length: maximum length to encode
min_encoder_length: minimum allowed length to encode. Defaults to max_encoder_length.
min_prediction_idx: minimum ``time_idx`` from where to start predictions. This parameter can be useful to
create a validation or test set.
max_prediction_length: maximum prediction/decoder length (choose this not too short as it can help
convergence)
min_prediction_length: minimum prediction/decoder length. Defaults to max_prediction_length
static_categoricals: list of categorical variables that do not change over time,
entries can be also lists which are then encoded together
(e.g. useful for product categories)
static_reals: list of continuous variables that do not change over time
time_varying_known_categoricals: list of categorical variables that change over
time and are know in the future, entries can be also lists which are then encoded together
(e.g. useful for special days or promotion categories)
time_varying_known_reals: list of continuous variables that change over
time and are know in the future
time_varying_unknown_categoricals: list of categorical variables that change over
time and are not know in the future, entries can be also lists which are then encoded together
(e.g. useful for weather categories)
time_varying_unknown_reals: list of continuous variables that change over
time and are not know in the future
variable_groups: dictionary mapping a name to a list of columns in the data. The name should be present
in a categorical or real class argument, to be able to encode or scale the columns by group.
dropout_categoricals: list of categorical variables that are unknown when making a forecast without
observed history
constant_fill_strategy: dictionary of column names with constants to fill in missing values if there are
gaps in the sequence (by default forward fill strategy is used). The values will be only used if
``allow_missings=True``. A common use case is to denote that demand was 0 if the sample is not in
the dataset.
allow_missings: if to allow missing timesteps that are automatically filled up. Missing values
refer to gaps in the ``time_idx``, e.g. if a specific timeseries has only samples for
1, 2, 4, 5, the sample for 3 will be generated on-the-fly.
Allow missings does not deal with ``NA`` values. You should fill NA values before
passing the dataframe to the TimeSeriesDataSet.
add_relative_time_idx: if to add a relative time index as feature (i.e. for each sampled sequence, the index
will range from -encoder_length to prediction_length)
add_target_scales: if to add scales for target to static real features (i.e. add the center and scale
of the unnormalized timeseries as features)
add_encoder_length: if to add decoder length to list of static real variables. Defaults to "auto",
i.e. yes if ``min_encoder_length != max_encoder_length``.
target_normalizer: transformer that takes group_ids, target and time_idx to return normalized targets.
You can choose from :py:class:`~TorchNormalizer`, :py:class:`~NaNLabelEncoder`,
:py:class:`~EncoderNormalizer` or `None` for using not normalizer.
By default an appropriate normalizer is chosen automatically.
categorical_encoders: dictionary of scikit learn label transformers. If you have unobserved categories in
the future, you can use the :py:class:`~pytorch_forecasting.encoders.NaNLabelEncoder` with
``add_nan=True``. Defaults effectively to sklearn's ``LabelEncoder()``. Prefittet encoders will not
be fit again.
scalers: dictionary of scikit learn scalers. Defaults to sklearn's ``StandardScaler()``.
Other options are :py:class:`~pytorch_forecasting.data.encoders.EncoderNormalizer`,
:py:class:`~pytorch_forecasting.data.encoders.GroupNormalizer` or scikit-learn's ``StandarScaler()``,
``RobustScaler()`` or `None` for using not normalizer.
Prefittet encoders will not be fit again (with the exception of the
:py:class:`~pytorch_forecasting.data.encoders.EncoderNormalizer`).
randomize_length: None or False if not to randomize lengths. Tuple of beta distribution concentrations
from which
probabilities are sampled that are used to sample new sequence lengths with a binomial
distribution.
If True, defaults to (0.2, 0.05), i.e. ~1/4 of samples around minimum encoder length.
Defaults to False otherwise.
predict_mode: if to only iterate over each timeseries once (only the last provided samples).
Effectively, this will take choose for each time series identified by ``group_ids``
the last ``max_prediction_length`` samples of each time series as
prediction samples and everthing previous up to ``max_encoder_length`` samples as encoder samples.
"""
super().__init__()
self.max_encoder_length = max_encoder_length
assert isinstance(self.max_encoder_length, int), (
"max encoder length must be integer"
)
if min_encoder_length is None:
min_encoder_length = max_encoder_length
self.min_encoder_length = min_encoder_length
assert self.min_encoder_length <= self.max_encoder_length, (
"max encoder length has to be larger equals min encoder length"
)
assert isinstance(self.min_encoder_length, int), (
"min encoder length must be integer"
)
self.max_prediction_length = max_prediction_length
assert isinstance(self.max_prediction_length, int), (
"max prediction length must be integer"
)
if min_prediction_length is None:
min_prediction_length = max_prediction_length
self.min_prediction_length = min_prediction_length
assert self.min_prediction_length <= self.max_prediction_length, (
"max prediction length has to be larger equals min prediction length"
)
assert self.min_prediction_length > 0, "min prediction length must be larger than 0"
assert isinstance(self.min_prediction_length, int), (
"min prediction length must be integer"
)
self.target = target
self.weight = weight
self.time_idx = time_idx
self.group_ids = [] + group_ids
self.static_categoricals = [] + static_categoricals
self.static_reals = [] + static_reals
self.time_varying_known_categoricals = [] + time_varying_known_categoricals
self.time_varying_known_reals = [] + time_varying_known_reals
self.time_varying_unknown_categoricals = [] + time_varying_unknown_categoricals
self.time_varying_unknown_reals = [] + time_varying_unknown_reals
self.dropout_categoricals = [] + dropout_categoricals
self.add_relative_time_idx = add_relative_time_idx
# set automatic defaults
if isinstance(randomize_length, bool):
if not randomize_length:
randomize_length = None
else:
randomize_length = (0.2, 0.05)
self.randomize_length = randomize_length
if min_prediction_idx is None:
min_prediction_idx = data[self.time_idx].min()
self.min_prediction_idx = min_prediction_idx
self.constant_fill_strategy = (
{} if len(constant_fill_strategy) == 0 else constant_fill_strategy
)
self.predict_mode = predict_mode
self.allow_missings = allow_missings
self.target_normalizer = target_normalizer
self.categorical_encoders = (
{} if len(categorical_encoders) == 0 else categorical_encoders
)
self.scalers = {} if len(scalers) == 0 else scalers
self.add_target_scales = add_target_scales
self.variable_groups = {} if len(variable_groups) == 0 else variable_groups
# add_encoder_length
if isinstance(add_encoder_length, str):
assert add_encoder_length == "auto", (
f"Only 'auto' allowed for add_encoder_length but found {add_encoder_length}"
)
add_encoder_length = self.min_encoder_length != self.max_encoder_length
assert isinstance(add_encoder_length, bool), (
f"add_encoder_length should be boolean or 'auto' but found {add_encoder_length}"
)
self.add_encoder_length = add_encoder_length
# target normalizer
self._set_target_normalizer(data)
# overwrite values
self.reset_overwrite_values()
for target in self.target_names:
assert target not in self.time_varying_known_reals, (
f"target {target} should be an unknown continuous variable in the future"
)
# set data
assert data.index.is_unique, "data index has to be unique"
if min_prediction_idx is not None:
data = data[
lambda x: data[self.time_idx]
>= self.min_prediction_idx - self.max_encoder_length
]
data = data.sort_values(self.group_ids + [self.time_idx])
# add time index relative to prediction position
if self.add_relative_time_idx:
assert "relative_time_idx" not in data.columns, (
"relative_time_idx is a protected column and must not be present in data"
)
if (
"relative_time_idx" not in self.time_varying_known_reals
and "relative_time_idx" not in self.reals
):
self.time_varying_known_reals.append("relative_time_idx")
data["relative_time_idx"] = (
0.0 # dummy - real value will be set dynamiclly in __getitem__()
)
# add decoder length to static real variables
if self.add_encoder_length:
assert "encoder_length" not in data.columns, (
"encoder_length is a protected column and must not be present in data"
)
if (
"encoder_length" not in self.time_varying_known_reals
and "encoder_length" not in self.reals
):
self.static_reals.append("encoder_length")
data["encoder_length"] = (
0 # dummy - real value will be set dynamiclly in __getitem__()
)
# validate
self._validate_data(data)
# preprocess data
data = self._preprocess_data(data)
# create index
self.index = self._construct_index(data, predict_mode=predict_mode)
# convert to torch tensor for high performance data loading later
self.data = self._data_to_tensors(data)
|
https://github.com/jdb78/pytorch-forecasting/issues/187
|
RuntimeError Traceback (most recent call last)
<ipython-input-26-a92b5627800b> in <module>
1 # find optimal learning rate
----> 2 res = trainer.tuner.lr_find(
3 tft,
4 train_dataloader=train_dataloader,
5 val_dataloaders=val_dataloader,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
118 datamodule: Optional[LightningDataModule] = None
119 ):
--> 120 return lr_find(
121 self.trainer,
122 model,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
167
168 # Fit, lr & loss logged in callback
--> 169 trainer.fit(model,
170 train_dataloader=train_dataloader,
171 val_dataloaders=val_dataloaders,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
444 self.call_hook('on_fit_start')
445
--> 446 results = self.accelerator_backend.train()
447 self.accelerator_backend.teardown()
448
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\cpu_accelerator.py in train(self)
57
58 # train or test
---> 59 results = self.train_or_test()
60 return results
61
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in train(self)
493
494 # run train epoch
--> 495 self.train_loop.run_training_epoch()
496
497 if self.max_steps and self.max_steps <= self.global_step:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_epoch(self)
559 # TRAINING_STEP + TRAINING_STEP_END
560 # ------------------------------------
--> 561 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
562
563 # when returning -1 from train_step, we end epoch early
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
726
727 # optimizer step
--> 728 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
729
730 else:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure, *args, **kwargs)
467 with self.trainer.profiler.profile("optimizer_step"):
468 # optimizer step lightningModule hook
--> 469 self.trainer.accelerator_backend.optimizer_step(
470 optimizer, batch_idx, opt_idx, train_step_and_backward_closure, *args, **kwargs
471 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in optimizer_step(self, optimizer, batch_idx, opt_idx, lambda_closure, *args, **kwargs)
112
113 # model hook
--> 114 model_ref.optimizer_step(
115 epoch=self.trainer.current_epoch,
116 batch_idx=batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs, *args, **kwargs)
1378 optimizer.step(*args, **kwargs)
1379 else:
-> 1380 optimizer.step(closure=optimizer_closure, *args, **kwargs)
1381
1382 def optimizer_zero_grad(
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\optim\lr_scheduler.py in wrapper(*args, **kwargs)
65 instance._step_count += 1
66 wrapped = func.__get__(instance, cls)
---> 67 return wrapped(*args, **kwargs)
68
69 # Note that the returned function here is no longer a bound method,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_forecasting\optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in train_step_and_backward_closure()
716
717 def train_step_and_backward_closure():
--> 718 result = self.training_step_and_backward(
719 split_batch,
720 batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
821 # backward pass
822 with self.trainer.profiler.profile("model_backward"):
--> 823 self.backward(result, optimizer, opt_idx)
824
825 # hook - call this hook only
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
841 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
842 else:
--> 843 result.closure_loss = self.trainer.accelerator_backend.backward(
844 result.closure_loss, optimizer, opt_idx, *args, **kwargs
845 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
93 # do backward pass
94 model = self.trainer.get_model()
---> 95 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
96
97 # once backward has been applied, release graph
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1256 """
1257 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1258 loss.backward(*args, **kwargs)
1259
1260 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\autograd\__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def _preprocess_data(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Scale continuous variables, encode categories and set aside target and weight.
Args:
data (pd.DataFrame): original data
Returns:
pd.DataFrame: pre-processed dataframe
"""
# encode group ids - this encoding
for name, group_name in self._group_ids_mapping.items():
self.categorical_encoders[group_name] = NaNLabelEncoder().fit(
data[name].to_numpy().reshape(-1)
)
data[group_name] = self.transform_values(
name, data[name], inverse=False, group_id=True
)
# encode categoricals
if isinstance(
self.target_normalizer, (GroupNormalizer, MultiNormalizer)
): # if we use a group normalizer, group_ids must be encoded as well
group_ids_to_encode = self.group_ids
else:
group_ids_to_encode = []
for name in set(group_ids_to_encode + self.categoricals):
allow_nans = name in self.dropout_categoricals
if name in self.variable_groups: # fit groups
columns = self.variable_groups[name]
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[columns].to_numpy().reshape(-1))
elif self.categorical_encoders[name] is not None:
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[columns].to_numpy().reshape(-1))
else:
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[name])
elif (
self.categorical_encoders[name] is not None
and name not in self.target_names
):
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[name])
# encode them
for name in set(group_ids_to_encode + self.flat_categoricals):
if name not in self.target_names:
data[name] = self.transform_values(name, data[name], inverse=False)
# save special variables
assert "__time_idx__" not in data.columns, (
"__time_idx__ is a protected column and must not be present in data"
)
data["__time_idx__"] = data[self.time_idx] # save unscaled
for target in self.target_names:
assert f"__target__{target}" not in data.columns, (
f"__target__{target} is a protected column and must not be present in data"
)
data[f"__target__{target}"] = data[target]
if self.weight is not None:
data["__weight__"] = data[self.weight]
# train target normalizer
if self.target_normalizer is not None:
# fit target normalizer
try:
check_is_fitted(self.target_normalizer)
except NotFittedError:
if isinstance(self.target_normalizer, EncoderNormalizer):
self.target_normalizer.fit(data[self.target])
elif isinstance(self.target_normalizer, (GroupNormalizer, MultiNormalizer)):
self.target_normalizer.fit(data[self.target], data)
else:
self.target_normalizer.fit(data[self.target])
# transform target
if isinstance(self.target_normalizer, EncoderNormalizer):
# we approximate the scales and target transformation by assuming one
# transformation over the entire time range but by each group
common_init_args = [
name
for name in inspect.signature(
GroupNormalizer.__init__
).parameters.keys()
if name
in inspect.signature(EncoderNormalizer.__init__).parameters.keys()
and name not in ["data", "self"]
]
copy_kwargs = {
name: getattr(self.target_normalizer, name) for name in common_init_args
}
normalizer = GroupNormalizer(groups=self.group_ids, **copy_kwargs)
data[self.target], scales = normalizer.fit_transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, GroupNormalizer):
data[self.target], scales = self.target_normalizer.transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, MultiNormalizer):
transformed, scales = self.target_normalizer.transform(
data[self.target], data, return_norm=True
)
for idx, target in enumerate(self.target_names):
data[target] = transformed[idx]
if isinstance(self.target_normalizer[idx], NaNLabelEncoder):
# overwrite target because it requires encoding (continuous targets should not be normalized)
data[f"__target__{target}"] = data[target]
elif isinstance(self.target_normalizer, NaNLabelEncoder):
data[self.target] = self.target_normalizer.transform(data[self.target])
# overwrite target because it requires encoding (continuous targets should not be normalized)
data[f"__target__{self.target}"] = data[self.target]
scales = None
else:
data[self.target], scales = self.target_normalizer.transform(
data[self.target], return_norm=True
)
# add target scales
if self.add_target_scales:
if not isinstance(self.target_normalizer, MultiNormalizer):
scales = [scales]
for target_idx, target in enumerate(self.target_names):
if not isinstance(self.target_normalizers[target_idx], NaNLabelEncoder):
for scale_idx, name in enumerate(["center", "scale"]):
feature_name = f"{target}_{name}"
assert feature_name not in data.columns, (
f"{feature_name} is a protected column and must not be present in data"
)
data[feature_name] = scales[target_idx][:, scale_idx].squeeze()
if feature_name not in self.reals:
self.static_reals.append(feature_name)
# rescale continuous variables apart from target
for name in self.reals:
if name in self.target_names:
continue
if name not in self.scalers:
self.scalers[name] = StandardScaler().fit(data[[name]])
elif self.scalers[name] is not None:
try:
check_is_fitted(self.scalers[name])
except NotFittedError:
if isinstance(self.scalers[name], GroupNormalizer):
self.scalers[name] = self.scalers[name].fit(data[[name]], data)
else:
self.scalers[name] = self.scalers[name].fit(data[[name]])
if self.scalers[name] is not None and not isinstance(
self.scalers[name], EncoderNormalizer
):
data[name] = self.transform_values(
name, data[name], data=data, inverse=False
)
# encode constant values
self.encoded_constant_fill_strategy = {}
for name, value in self.constant_fill_strategy.items():
if name in self.target_names:
self.encoded_constant_fill_strategy[f"__target__{name}"] = value
self.encoded_constant_fill_strategy[name] = self.transform_values(
name, np.array([value]), data=data, inverse=False
)[0]
return data
|
def _preprocess_data(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Scale continuous variables, encode categories and set aside target and weight.
Args:
data (pd.DataFrame): original data
Returns:
pd.DataFrame: pre-processed dataframe
"""
# encode group ids - this encoding
for name, group_name in self._group_ids_mapping.items():
self.categorical_encoders[group_name] = NaNLabelEncoder().fit(
data[name].to_numpy().reshape(-1)
)
data[group_name] = self.transform_values(
name, data[name], inverse=False, group_id=True
)
# encode categoricals
if isinstance(
self.target_normalizer, (GroupNormalizer, MultiNormalizer)
): # if we use a group normalizer, group_ids must be encoded as well
group_ids_to_encode = self.group_ids
else:
group_ids_to_encode = []
for name in set(group_ids_to_encode + self.categoricals):
allow_nans = name in self.dropout_categoricals
if name in self.variable_groups: # fit groups
columns = self.variable_groups[name]
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[columns].to_numpy().reshape(-1))
elif self.categorical_encoders[name] is not None:
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[columns].to_numpy().reshape(-1))
else:
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[name])
elif (
self.categorical_encoders[name] is not None
and name not in self.target_names
):
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[name])
# encode them
for name in set(group_ids_to_encode + self.flat_categoricals):
if name not in self.target_names:
data[name] = self.transform_values(name, data[name], inverse=False)
# save special variables
assert "__time_idx__" not in data.columns, (
"__time_idx__ is a protected column and must not be present in data"
)
data["__time_idx__"] = data[self.time_idx] # save unscaled
for target in self.target_names:
assert f"__target__{target}" not in data.columns, (
f"__target__{target} is a protected column and must not be present in data"
)
data[f"__target__{target}"] = data[target]
if self.weight is not None:
data["__weight__"] = data[self.weight]
# train target normalizer
if self.target_normalizer is not None:
# fit target normalizer
try:
check_is_fitted(self.target_normalizer)
except NotFittedError:
if isinstance(self.target_normalizer, EncoderNormalizer):
self.target_normalizer.fit(data[self.target])
elif isinstance(self.target_normalizer, (GroupNormalizer, MultiNormalizer)):
self.target_normalizer.fit(data[self.target], data)
else:
self.target_normalizer.fit(data[self.target])
# transform target
if isinstance(self.target_normalizer, EncoderNormalizer):
# we approximate the scales and target transformation by assuming one
# transformation over the entire time range but by each group
common_init_args = [
name
for name in inspect.signature(
GroupNormalizer.__init__
).parameters.keys()
if name
in inspect.signature(EncoderNormalizer.__init__).parameters.keys()
and name not in ["data", "self"]
]
copy_kwargs = {
name: getattr(self.target_normalizer, name) for name in common_init_args
}
normalizer = GroupNormalizer(groups=self.group_ids, **copy_kwargs)
data[self.target], scales = normalizer.fit_transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, GroupNormalizer):
data[self.target], scales = self.target_normalizer.transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, MultiNormalizer):
transformed, scales = self.target_normalizer.transform(
data[self.target], data, return_norm=True
)
for idx, target in enumerate(self.target_names):
data[target] = transformed[idx]
elif isinstance(self.target_normalizer, NaNLabelEncoder):
data[self.target] = self.target_normalizer.transform(data[self.target])
data[f"__target__{self.target}"] = data[
self.target
] # overwrite target because it requires encoding (continuous targets should not be normalized)
scales = None
else:
data[self.target], scales = self.target_normalizer.transform(
data[self.target], return_norm=True
)
# add target scales
if self.add_target_scales:
if not isinstance(self.target_normalizer, MultiNormalizer):
scales = [scales]
for target_idx, target in enumerate(self.target_names):
if not isinstance(self.target_normalizers[target_idx], NaNLabelEncoder):
for scale_idx, name in enumerate(["center", "scale"]):
feature_name = f"{target}_{name}"
assert feature_name not in data.columns, (
f"{feature_name} is a protected column and must not be present in data"
)
data[feature_name] = scales[target_idx][:, scale_idx].squeeze()
if feature_name not in self.reals:
self.static_reals.append(feature_name)
# rescale continuous variables apart from target
for name in self.reals:
if name in self.target_names:
continue
if name not in self.scalers:
self.scalers[name] = StandardScaler().fit(data[[name]])
elif self.scalers[name] is not None:
try:
check_is_fitted(self.scalers[name])
except NotFittedError:
if isinstance(self.scalers[name], GroupNormalizer):
self.scalers[name] = self.scalers[name].fit(data[[name]], data)
else:
self.scalers[name] = self.scalers[name].fit(data[[name]])
if self.scalers[name] is not None and not isinstance(
self.scalers[name], EncoderNormalizer
):
data[name] = self.transform_values(
name, data[name], data=data, inverse=False
)
# encode constant values
self.encoded_constant_fill_strategy = {}
for name, value in self.constant_fill_strategy.items():
if name in self.target_names:
self.encoded_constant_fill_strategy[f"__target__{name}"] = value
self.encoded_constant_fill_strategy[name] = self.transform_values(
name, np.array([value]), data=data, inverse=False
)[0]
return data
|
https://github.com/jdb78/pytorch-forecasting/issues/187
|
RuntimeError Traceback (most recent call last)
<ipython-input-26-a92b5627800b> in <module>
1 # find optimal learning rate
----> 2 res = trainer.tuner.lr_find(
3 tft,
4 train_dataloader=train_dataloader,
5 val_dataloaders=val_dataloader,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
118 datamodule: Optional[LightningDataModule] = None
119 ):
--> 120 return lr_find(
121 self.trainer,
122 model,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
167
168 # Fit, lr & loss logged in callback
--> 169 trainer.fit(model,
170 train_dataloader=train_dataloader,
171 val_dataloaders=val_dataloaders,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
444 self.call_hook('on_fit_start')
445
--> 446 results = self.accelerator_backend.train()
447 self.accelerator_backend.teardown()
448
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\cpu_accelerator.py in train(self)
57
58 # train or test
---> 59 results = self.train_or_test()
60 return results
61
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in train(self)
493
494 # run train epoch
--> 495 self.train_loop.run_training_epoch()
496
497 if self.max_steps and self.max_steps <= self.global_step:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_epoch(self)
559 # TRAINING_STEP + TRAINING_STEP_END
560 # ------------------------------------
--> 561 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
562
563 # when returning -1 from train_step, we end epoch early
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
726
727 # optimizer step
--> 728 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
729
730 else:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure, *args, **kwargs)
467 with self.trainer.profiler.profile("optimizer_step"):
468 # optimizer step lightningModule hook
--> 469 self.trainer.accelerator_backend.optimizer_step(
470 optimizer, batch_idx, opt_idx, train_step_and_backward_closure, *args, **kwargs
471 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in optimizer_step(self, optimizer, batch_idx, opt_idx, lambda_closure, *args, **kwargs)
112
113 # model hook
--> 114 model_ref.optimizer_step(
115 epoch=self.trainer.current_epoch,
116 batch_idx=batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs, *args, **kwargs)
1378 optimizer.step(*args, **kwargs)
1379 else:
-> 1380 optimizer.step(closure=optimizer_closure, *args, **kwargs)
1381
1382 def optimizer_zero_grad(
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\optim\lr_scheduler.py in wrapper(*args, **kwargs)
65 instance._step_count += 1
66 wrapped = func.__get__(instance, cls)
---> 67 return wrapped(*args, **kwargs)
68
69 # Note that the returned function here is no longer a bound method,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_forecasting\optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in train_step_and_backward_closure()
716
717 def train_step_and_backward_closure():
--> 718 result = self.training_step_and_backward(
719 split_batch,
720 batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
821 # backward pass
822 with self.trainer.profiler.profile("model_backward"):
--> 823 self.backward(result, optimizer, opt_idx)
824
825 # hook - call this hook only
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
841 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
842 else:
--> 843 result.closure_loss = self.trainer.accelerator_backend.backward(
844 result.closure_loss, optimizer, opt_idx, *args, **kwargs
845 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
93 # do backward pass
94 model = self.trainer.get_model()
---> 95 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
96
97 # once backward has been applied, release graph
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1256 """
1257 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1258 loss.backward(*args, **kwargs)
1259
1260 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\autograd\__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def _data_to_tensors(self, data: pd.DataFrame) -> Dict[str, torch.Tensor]:
"""
Convert data to tensors for faster access with :py:meth:`~__getitem__`.
Args:
data (pd.DataFrame): preprocessed data
Returns:
Dict[str, torch.Tensor]: dictionary of tensors for continous, categorical data, groups, target and
time index
"""
index = check_for_nonfinite(
torch.tensor(data[self._group_ids].to_numpy(np.long), dtype=torch.long),
self.group_ids,
)
time = check_for_nonfinite(
torch.tensor(data["__time_idx__"].to_numpy(np.long), dtype=torch.long),
self.time_idx,
)
# categorical covariates
categorical = check_for_nonfinite(
torch.tensor(data[self.flat_categoricals].to_numpy(np.long), dtype=torch.long),
self.flat_categoricals,
)
# get weight
if self.weight is not None:
weight = check_for_nonfinite(
torch.tensor(
data["__weight__"].to_numpy(dtype=np.float),
dtype=torch.float,
),
self.weight,
)
else:
weight = None
# get target
if isinstance(self.target_normalizer, NaNLabelEncoder):
target = [
check_for_nonfinite(
torch.tensor(
data[f"__target__{self.target}"].to_numpy(dtype=np.long),
dtype=torch.long,
),
self.target,
)
]
else:
if not isinstance(self.target, str): # multi-target
target = [
check_for_nonfinite(
torch.tensor(
data[f"__target__{name}"].to_numpy(
dtype=[np.float, np.long][data[name].dtype.kind in "bi"]
),
dtype=[torch.float, torch.long][data[name].dtype.kind in "bi"],
),
name,
)
for name in self.target_names
]
else:
target = [
check_for_nonfinite(
torch.tensor(
data[f"__target__{self.target}"].to_numpy(dtype=np.float),
dtype=torch.float,
),
self.target,
)
]
# continuous covariates
continuous = check_for_nonfinite(
torch.tensor(data[self.reals].to_numpy(dtype=np.float), dtype=torch.float),
self.reals,
)
tensors = dict(
reals=continuous,
categoricals=categorical,
groups=index,
target=target,
weight=weight,
time=time,
)
return tensors
|
def _data_to_tensors(self, data: pd.DataFrame) -> Dict[str, torch.Tensor]:
"""
Convert data to tensors for faster access with :py:meth:`~__getitem__`.
Args:
data (pd.DataFrame): preprocessed data
Returns:
Dict[str, torch.Tensor]: dictionary of tensors for continous, categorical data, groups, target and
time index
"""
index = torch.tensor(data[self._group_ids].to_numpy(np.long), dtype=torch.long)
time = torch.tensor(data["__time_idx__"].to_numpy(np.long), dtype=torch.long)
# categorical covariates
categorical = torch.tensor(
data[self.flat_categoricals].to_numpy(np.long), dtype=torch.long
)
# get weight
if self.weight is not None:
weight = torch.tensor(
data["__weight__"].to_numpy(dtype=np.float),
dtype=torch.float,
)
else:
weight = None
# get target
if isinstance(self.target_normalizer, NaNLabelEncoder):
target = [
torch.tensor(
data[f"__target__{self.target}"].to_numpy(dtype=np.long),
dtype=torch.long,
)
]
else:
if len(self.target_names) > 1:
target = [
torch.tensor(
data[name].to_numpy(
dtype=[np.float, np.long][data[name].dtype.kind in "bi"]
),
dtype=[torch.float, torch.long][data[name].dtype.kind in "bi"],
)
for name in self.target_names
]
else:
target = [
torch.tensor(
data[f"__target__{self.target}"].to_numpy(dtype=np.float),
dtype=torch.float,
)
]
# continuous covariates
continuous = torch.tensor(
data[self.reals].to_numpy(dtype=np.float), dtype=torch.float
)
tensors = dict(
reals=continuous,
categoricals=categorical,
groups=index,
target=target,
weight=weight,
time=time,
)
return tensors
|
https://github.com/jdb78/pytorch-forecasting/issues/187
|
RuntimeError Traceback (most recent call last)
<ipython-input-26-a92b5627800b> in <module>
1 # find optimal learning rate
----> 2 res = trainer.tuner.lr_find(
3 tft,
4 train_dataloader=train_dataloader,
5 val_dataloaders=val_dataloader,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
118 datamodule: Optional[LightningDataModule] = None
119 ):
--> 120 return lr_find(
121 self.trainer,
122 model,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
167
168 # Fit, lr & loss logged in callback
--> 169 trainer.fit(model,
170 train_dataloader=train_dataloader,
171 val_dataloaders=val_dataloaders,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
444 self.call_hook('on_fit_start')
445
--> 446 results = self.accelerator_backend.train()
447 self.accelerator_backend.teardown()
448
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\cpu_accelerator.py in train(self)
57
58 # train or test
---> 59 results = self.train_or_test()
60 return results
61
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in train(self)
493
494 # run train epoch
--> 495 self.train_loop.run_training_epoch()
496
497 if self.max_steps and self.max_steps <= self.global_step:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_epoch(self)
559 # TRAINING_STEP + TRAINING_STEP_END
560 # ------------------------------------
--> 561 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
562
563 # when returning -1 from train_step, we end epoch early
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
726
727 # optimizer step
--> 728 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
729
730 else:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure, *args, **kwargs)
467 with self.trainer.profiler.profile("optimizer_step"):
468 # optimizer step lightningModule hook
--> 469 self.trainer.accelerator_backend.optimizer_step(
470 optimizer, batch_idx, opt_idx, train_step_and_backward_closure, *args, **kwargs
471 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in optimizer_step(self, optimizer, batch_idx, opt_idx, lambda_closure, *args, **kwargs)
112
113 # model hook
--> 114 model_ref.optimizer_step(
115 epoch=self.trainer.current_epoch,
116 batch_idx=batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs, *args, **kwargs)
1378 optimizer.step(*args, **kwargs)
1379 else:
-> 1380 optimizer.step(closure=optimizer_closure, *args, **kwargs)
1381
1382 def optimizer_zero_grad(
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\optim\lr_scheduler.py in wrapper(*args, **kwargs)
65 instance._step_count += 1
66 wrapped = func.__get__(instance, cls)
---> 67 return wrapped(*args, **kwargs)
68
69 # Note that the returned function here is no longer a bound method,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_forecasting\optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in train_step_and_backward_closure()
716
717 def train_step_and_backward_closure():
--> 718 result = self.training_step_and_backward(
719 split_batch,
720 batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
821 # backward pass
822 with self.trainer.profiler.profile("model_backward"):
--> 823 self.backward(result, optimizer, opt_idx)
824
825 # hook - call this hook only
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
841 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
842 else:
--> 843 result.closure_loss = self.trainer.accelerator_backend.backward(
844 result.closure_loss, optimizer, opt_idx, *args, **kwargs
845 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
93 # do backward pass
94 model = self.trainer.get_model()
---> 95 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
96
97 # once backward has been applied, release graph
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1256 """
1257 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1258 loss.backward(*args, **kwargs)
1259
1260 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\autograd\__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def autocorrelation(input, dim=0):
"""
Computes the autocorrelation of samples at dimension ``dim``.
Reference: https://en.wikipedia.org/wiki/Autocorrelation#Efficient_computation
Implementation copied form `pyro <https://github.com/pyro-ppl/pyro/blob/dev/pyro/ops/stats.py>`_.
:param torch.Tensor input: the input tensor.
:param int dim: the dimension to calculate autocorrelation.
:returns torch.Tensor: autocorrelation of ``input``.
"""
# Adapted from Stan implementation
# https://github.com/stan-dev/math/blob/develop/stan/math/prim/mat/fun/autocorrelation.hpp
N = input.size(dim)
M = next_fast_len(N)
M2 = 2 * M
# transpose dim with -1 for Fourier transform
input = input.transpose(dim, -1)
# centering and padding x
centered_signal = input - input.mean(dim=-1, keepdim=True)
# Fourier transform
freqvec = torch.view_as_real(rfft(centered_signal, n=M2))
# take square of magnitude of freqvec (or freqvec x freqvec*)
freqvec_gram = freqvec.pow(2).sum(-1)
# inverse Fourier transform
autocorr = irfft(freqvec_gram, n=M2)
# truncate and normalize the result, then transpose back to original shape
autocorr = autocorr[..., :N]
autocorr = autocorr / torch.tensor(
range(N, 0, -1), dtype=input.dtype, device=input.device
)
autocorr = autocorr / autocorr[..., :1]
return autocorr.transpose(dim, -1)
|
def autocorrelation(input, dim=0):
"""
Computes the autocorrelation of samples at dimension ``dim``.
Reference: https://en.wikipedia.org/wiki/Autocorrelation#Efficient_computation
Implementation copied form ``pyro``.
:param torch.Tensor input: the input tensor.
:param int dim: the dimension to calculate autocorrelation.
:returns torch.Tensor: autocorrelation of ``input``.
"""
if (not input.is_cuda) and (not torch.backends.mkl.is_available()):
raise NotImplementedError(
"For CPU tensor, this method is only supported with MKL installed."
)
# Adapted from Stan implementation
# https://github.com/stan-dev/math/blob/develop/stan/math/prim/mat/fun/autocorrelation.hpp
N = input.size(dim)
M = next_fast_len(N)
M2 = 2 * M
# transpose dim with -1 for Fourier transform
input = input.transpose(dim, -1)
# centering and padding x
centered_signal = input - input.mean(dim=-1, keepdim=True)
pad = torch.zeros(
input.shape[:-1] + (M2 - N,), dtype=input.dtype, device=input.device
)
centered_signal = torch.cat([centered_signal, pad], dim=-1)
# Fourier transform
freqvec = torch.rfft(centered_signal, signal_ndim=1, onesided=False)
# take square of magnitude of freqvec (or freqvec x freqvec*)
freqvec_gram = freqvec.pow(2).sum(-1, keepdim=True)
freqvec_gram = torch.cat(
[
freqvec_gram,
torch.zeros(freqvec_gram.shape, dtype=input.dtype, device=input.device),
],
dim=-1,
)
# inverse Fourier transform
autocorr = torch.irfft(freqvec_gram, signal_ndim=1, onesided=False)
# truncate and normalize the result, then transpose back to original shape
autocorr = autocorr[..., :N]
autocorr = autocorr / torch.tensor(
range(N, 0, -1), dtype=input.dtype, device=input.device
)
autocorr = autocorr / autocorr[..., :1]
return autocorr.transpose(dim, -1)
|
https://github.com/jdb78/pytorch-forecasting/issues/187
|
RuntimeError Traceback (most recent call last)
<ipython-input-26-a92b5627800b> in <module>
1 # find optimal learning rate
----> 2 res = trainer.tuner.lr_find(
3 tft,
4 train_dataloader=train_dataloader,
5 val_dataloaders=val_dataloader,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
118 datamodule: Optional[LightningDataModule] = None
119 ):
--> 120 return lr_find(
121 self.trainer,
122 model,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\tuner\lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
167
168 # Fit, lr & loss logged in callback
--> 169 trainer.fit(model,
170 train_dataloader=train_dataloader,
171 val_dataloaders=val_dataloaders,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
444 self.call_hook('on_fit_start')
445
--> 446 results = self.accelerator_backend.train()
447 self.accelerator_backend.teardown()
448
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\cpu_accelerator.py in train(self)
57
58 # train or test
---> 59 results = self.train_or_test()
60 return results
61
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\trainer.py in train(self)
493
494 # run train epoch
--> 495 self.train_loop.run_training_epoch()
496
497 if self.max_steps and self.max_steps <= self.global_step:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_epoch(self)
559 # TRAINING_STEP + TRAINING_STEP_END
560 # ------------------------------------
--> 561 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
562
563 # when returning -1 from train_step, we end epoch early
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
726
727 # optimizer step
--> 728 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
729
730 else:
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure, *args, **kwargs)
467 with self.trainer.profiler.profile("optimizer_step"):
468 # optimizer step lightningModule hook
--> 469 self.trainer.accelerator_backend.optimizer_step(
470 optimizer, batch_idx, opt_idx, train_step_and_backward_closure, *args, **kwargs
471 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in optimizer_step(self, optimizer, batch_idx, opt_idx, lambda_closure, *args, **kwargs)
112
113 # model hook
--> 114 model_ref.optimizer_step(
115 epoch=self.trainer.current_epoch,
116 batch_idx=batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs, *args, **kwargs)
1378 optimizer.step(*args, **kwargs)
1379 else:
-> 1380 optimizer.step(closure=optimizer_closure, *args, **kwargs)
1381
1382 def optimizer_zero_grad(
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\optim\lr_scheduler.py in wrapper(*args, **kwargs)
65 instance._step_count += 1
66 wrapped = func.__get__(instance, cls)
---> 67 return wrapped(*args, **kwargs)
68
69 # Note that the returned function here is no longer a bound method,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_forecasting\optim.py in step(self, closure)
129 closure: A closure that reevaluates the model and returns the loss.
130 """
--> 131 _ = closure()
132 loss = None
133 # note - below is commented out b/c I have other work that passes back
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in train_step_and_backward_closure()
716
717 def train_step_and_backward_closure():
--> 718 result = self.training_step_and_backward(
719 split_batch,
720 batch_idx,
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
821 # backward pass
822 with self.trainer.profiler.profile("model_backward"):
--> 823 self.backward(result, optimizer, opt_idx)
824
825 # hook - call this hook only
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\trainer\training_loop.py in backward(self, result, optimizer, opt_idx, *args, **kwargs)
841 self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
842 else:
--> 843 result.closure_loss = self.trainer.accelerator_backend.backward(
844 result.closure_loss, optimizer, opt_idx, *args, **kwargs
845 )
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\accelerators\accelerator.py in backward(self, closure_loss, optimizer, opt_idx, *args, **kwargs)
93 # do backward pass
94 model = self.trainer.get_model()
---> 95 model.backward(closure_loss, optimizer, opt_idx, *args, **kwargs)
96
97 # once backward has been applied, release graph
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\pytorch_lightning\core\lightning.py in backward(self, loss, optimizer, optimizer_idx, *args, **kwargs)
1256 """
1257 if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
-> 1258 loss.backward(*args, **kwargs)
1259
1260 def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\tensor.py in backward(self, gradient, retain_graph, create_graph)
219 retain_graph=retain_graph,
220 create_graph=create_graph)
--> 221 torch.autograd.backward(self, gradient, retain_graph, create_graph)
222
223 def register_hook(self, hook):
c:\users\u2\appdata\local\programs\python\python38\lib\site-packages\torch\autograd\__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
128 retain_graph = create_graph
129
--> 130 Variable._execution_engine.run_backward(
131 tensors, grad_tensors_, retain_graph, create_graph,
132 allow_unreachable=True) # allow_unreachable flag
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def __init__(
self,
method: str = "standard",
center: bool = True,
log_scale: Union[bool, float] = False,
log_zero_value: float = -np.inf,
coerce_positive: Union[float, bool] = None,
eps: float = 1e-8,
):
"""
Initialize
Args:
method (str, optional): method to rescale series. Either "standard" (standard scaling) or "robust"
(scale using quantiles 0.25-0.75). Defaults to "standard".
center (bool, optional): If to center the output to zero. Defaults to True.
log_scale (bool, optional): If to take log of values. Defaults to False. Defaults to False.
log_zero_value (float, optional): Value to map 0 to for ``log_scale=True`` or in softplus. Defaults to -inf.
coerce_positive (Union[bool, float, str], optional): If to coerce output to positive. Valid values:
* None, i.e. is automatically determined and might change to True if all values are >= 0 (Default).
* True, i.e. output is clamped at 0.
* False, i.e. values are not coerced
* float, i.e. softmax is applied with beta = coerce_positive.
eps (float, optional): Number for numerical stability of calcualtions.
Defaults to 1e-8. For count data, 1.0 is recommended.
"""
self.method = method
assert method in ["standard", "robust"], f"method has invalid value {method}"
self.center = center
self.eps = eps
# set log scale
self.log_zero_value = np.exp(log_zero_value)
self.log_scale = log_scale
# check if coerce positive should be determined automatically
if coerce_positive is None:
if log_scale:
coerce_positive = False
else:
assert not (self.log_scale and coerce_positive), (
"log scale means that output is transformed to a positive number by default while coercing positive"
" will apply softmax function - decide for either one or the other"
)
self.coerce_positive = coerce_positive
|
def __init__(
self,
method: str = "standard",
center: bool = True,
log_scale: Union[bool, float] = False,
log_zero_value: float = 0.0,
coerce_positive: Union[float, bool] = None,
eps: float = 1e-8,
):
"""
Initialize
Args:
method (str, optional): method to rescale series. Either "standard" (standard scaling) or "robust"
(scale using quantiles 0.25-0.75). Defaults to "standard".
center (bool, optional): If to center the output to zero. Defaults to True.
log_scale (bool, optional): If to take log of values. Defaults to False. Defaults to False.
log_zero_value (float, optional): Value to map 0 to for ``log_scale=True`` or in softplus. Defaults to 0.0
coerce_positive (Union[bool, float, str], optional): If to coerce output to positive. Valid values:
* None, i.e. is automatically determined and might change to True if all values are >= 0 (Default).
* True, i.e. output is clamped at 0.
* False, i.e. values are not coerced
* float, i.e. softmax is applied with beta = coerce_positive.
eps (float, optional): Number for numerical stability of calcualtions. Defaults to 1e-8.
"""
self.method = method
assert method in ["standard", "robust"], f"method has invalid value {method}"
self.center = center
self.eps = eps
# set log scale
self.log_zero_value = np.exp(log_zero_value)
self.log_scale = log_scale
# check if coerce positive should be determined automatically
if coerce_positive is None:
if log_scale:
coerce_positive = False
else:
assert not (self.log_scale and coerce_positive), (
"log scale means that output is transformed to a positive number by default while coercing positive"
" will apply softmax function - decide for either one or the other"
)
self.coerce_positive = coerce_positive
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def get_parameters(self, *args, **kwargs) -> torch.Tensor:
"""
Returns parameters that were used for encoding.
Returns:
torch.Tensor: First element is center of data and second is scale
"""
return torch.stack(
[torch.as_tensor(self.center_), torch.as_tensor(self.scale_)], dim=-1
)
|
def get_parameters(self, *args, **kwargs) -> torch.Tensor:
"""
Returns parameters that were used for encoding.
Returns:
torch.Tensor: First element is center of data and second is scale
"""
return torch.tensor([self.center_, self.scale_])
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def _preprocess_y(
self, y: Union[pd.Series, np.ndarray, torch.Tensor]
) -> Union[np.ndarray, torch.Tensor]:
"""
Preprocess input data (e.g. take log).
Can set coerce positive to a value if it was set to None and log_scale to False.
Returns:
Union[np.ndarray, torch.Tensor]: return rescaled series with type depending on input type
"""
if self.log_scale:
if isinstance(y, torch.Tensor):
y = torch.log(y + self.log_zero_value + self.eps)
else:
y = np.log(y + self.log_zero_value + self.eps)
return y
|
def _preprocess_y(
self, y: Union[pd.Series, np.ndarray, torch.Tensor]
) -> Union[np.ndarray, torch.Tensor]:
"""
Preprocess input data (e.g. take log).
Can set coerce positive to a value if it was set to None and log_scale to False.
Returns:
Union[np.ndarray, torch.Tensor]: return rescaled series with type depending on input type
"""
if self.coerce_positive is None and not self.log_scale:
self.coerce_positive = (y >= 0).all()
if self.log_scale:
if isinstance(y, torch.Tensor):
y = torch.log(y + self.log_zero_value)
else:
y = np.log(y + self.log_zero_value)
return y
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def fit(self, y: Union[pd.Series, np.ndarray, torch.Tensor]):
"""
Fit transformer, i.e. determine center and scale of data
Args:
y (Union[pd.Series, np.ndarray, torch.Tensor]): input data
Returns:
TorchNormalizer: self
"""
if self.coerce_positive is None and not self.log_scale:
self.coerce_positive = (y >= 0).all()
y = self._preprocess_y(y)
if self.method == "standard":
if isinstance(y, torch.Tensor):
self.center_ = torch.mean(y, dim=-1) + self.eps
self.scale_ = torch.std(y, dim=-1) + self.eps
elif isinstance(y, np.ndarray):
self.center_ = np.mean(y, axis=-1) + self.eps
self.scale_ = np.std(y, axis=-1) + self.eps
else:
self.center_ = np.mean(y) + self.eps
self.scale_ = np.std(y) + self.eps
elif self.method == "robust":
if isinstance(y, torch.Tensor):
self.center_ = torch.median(y, dim=-1).values + self.eps
q_75 = y.kthvalue(int(len(y) * 0.75), dim=-1).values
q_25 = y.kthvalue(int(len(y) * 0.25), dim=-1).values
elif isinstance(y, np.ndarray):
self.center_ = np.median(y, axis=-1) + self.eps
q_75 = np.percentiley(y, 75, axis=-1)
q_25 = np.percentiley(y, 25, axis=-1)
else:
self.center_ = np.median(y) + self.eps
q_75 = np.percentiley(y, 75)
q_25 = np.percentiley(y, 25)
self.scale_ = (q_75 - q_25) / 2.0 + self.eps
if not self.center:
self.scale_ = self.center_
if isinstance(y, torch.Tensor):
self.center_ = torch.zeros_like(self.center_)
else:
self.center_ = np.zeros_like(self.center_)
return self
|
def fit(self, y: Union[pd.Series, np.ndarray, torch.Tensor]):
"""
Fit transformer, i.e. determine center and scale of data
Args:
y (Union[pd.Series, np.ndarray, torch.Tensor]): input data
Returns:
TorchNormalizer: self
"""
y = self._preprocess_y(y)
if self.method == "standard":
if isinstance(y, torch.Tensor):
self.center_ = torch.mean(y)
self.scale_ = torch.std(y) / (self.center_ + self.eps)
else:
self.center_ = np.mean(y)
self.scale_ = np.std(y) / (self.center_ + self.eps)
elif self.method == "robust":
if isinstance(y, torch.Tensor):
self.center_ = torch.median(y)
q_75 = y.kthvalue(int(len(y) * 0.75)).values
q_25 = y.kthvalue(int(len(y) * 0.25)).values
else:
self.center_ = np.median(y)
q_75 = np.percentiley(y, 75)
q_25 = np.percentiley(y, 25)
self.scale_ = (q_75 - q_25) / (self.center_ + self.eps) / 2.0
return self
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def transform(
self,
y: Union[pd.Series, np.ndarray, torch.Tensor],
return_norm: bool = False,
target_scale: torch.Tensor = None,
) -> Union[
Tuple[Union[np.ndarray, torch.Tensor], np.ndarray], Union[np.ndarray, torch.Tensor]
]:
"""
Rescale data.
Args:
y (Union[pd.Series, np.ndarray, torch.Tensor]): input data
return_norm (bool, optional): [description]. Defaults to False.
target_scale (torch.Tensor): target scale to use instead of fitted center and scale
Returns:
Union[Tuple[Union[np.ndarray, torch.Tensor], np.ndarray], Union[np.ndarray, torch.Tensor]]: rescaled
data with type depending on input type. returns second element if ``return_norm=True``
"""
y = self._preprocess_y(y)
# get center and scale
if target_scale is None:
target_scale = self.get_parameters().numpy()[None, :]
center = target_scale[..., 0]
scale = target_scale[..., 1]
if y.ndim > center.ndim: # multiple batches -> expand size
center = center.view(*center.size(), *(1,) * (y.ndim - center.ndim))
scale = scale.view(*scale.size(), *(1,) * (y.ndim - scale.ndim))
# transform
y = (y - center) / scale
# return with center and scale or without
if return_norm:
return y, target_scale
else:
return y
|
def transform(
self, y: Union[pd.Series, np.ndarray, torch.Tensor], return_norm: bool = False
) -> Union[
Tuple[Union[np.ndarray, torch.Tensor], np.ndarray], Union[np.ndarray, torch.Tensor]
]:
"""
Rescale data.
Args:
y (Union[pd.Series, np.ndarray, torch.Tensor]): input data
return_norm (bool, optional): [description]. Defaults to False.
Returns:
Union[Tuple[Union[np.ndarray, torch.Tensor], np.ndarray], Union[np.ndarray, torch.Tensor]]: rescaled
data with type depending on input type. returns second element if ``return_norm=True``
"""
if self.log_scale:
if isinstance(y, torch.Tensor):
y = (y + self.log_zero_value + self.eps).log()
else:
y = np.log(y + self.log_zero_value + self.eps)
if self.center:
y = (y / (self.center_ + self.eps) - 1) / (self.scale_ + self.eps)
else:
y = y / (self.center_ + self.eps)
if return_norm:
return y, self.get_parameters().numpy()[None, :]
else:
return y
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def __call__(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:
"""
Inverse transformation but with network output as input.
Args:
data (Dict[str, torch.Tensor]): Dictionary with entries
* prediction: data to de-scale
* target_scale: center and scale of data
scale_only (bool): if to only scale prediction and not center it (even if `self.center is True`).
Defaults to False.
Returns:
torch.Tensor: de-scaled data
"""
# inverse transformation with tensors
norm = data["target_scale"]
# use correct shape for norm
if data["prediction"].ndim > norm.ndim:
norm = norm.unsqueeze(-1)
# transform
y_normed = data["prediction"] * norm[:, 1, None] + norm[:, 0, None]
if self.log_scale:
y_normed = (y_normed.exp() - self.log_zero_value).clamp_min(0.0)
elif isinstance(self.coerce_positive, bool) and self.coerce_positive:
y_normed = y_normed.clamp_min(0.0)
elif isinstance(self.coerce_positive, float):
y_normed = F.softplus(y_normed, beta=float(self.coerce_positive))
# return correct shape
if data["prediction"].ndim == 1 and y_normed.ndim > 1:
y_normed = y_normed.squeeze(0)
return y_normed
|
def __call__(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:
"""
Inverse transformation but with network output as input.
Args:
data (Dict[str, torch.Tensor]): Dictionary with entries
* prediction: data to de-scale
* target_scale: center and scale of data
Returns:
torch.Tensor: de-scaled data
"""
# inverse transformation with tensors
norm = data["target_scale"]
# use correct shape for norm
if data["prediction"].ndim > norm.ndim:
norm = norm.unsqueeze(-1)
# transform
if self.center:
y_normed = (data["prediction"] * norm[:, 1, None] + 1) * norm[:, 0, None]
else:
y_normed = data["prediction"] * norm[:, 0, None]
if self.log_scale:
y_normed = (y_normed.exp() - self.log_zero_value).clamp_min(0.0)
elif isinstance(self.coerce_positive, bool) and self.coerce_positive:
y_normed = y_normed.clamp_min(0.0)
elif isinstance(self.coerce_positive, float):
y_normed = F.softplus(y_normed, beta=float(self.coerce_positive))
# return correct shape
if data["prediction"].ndim == 1 and y_normed.ndim > 1:
y_normed = y_normed.squeeze(0)
return y_normed
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def __init__(
self,
method: str = "standard",
groups: List[str] = [],
center: bool = True,
scale_by_group: bool = False,
log_scale: Union[bool, float] = False,
log_zero_value: float = 0.0,
coerce_positive: Union[float, bool] = None,
eps: float = 1e-8,
):
"""
Group normalizer to normalize a given entry by groups. Can be used as target normalizer.
Args:
method (str, optional): method to rescale series. Either "standard" (standard scaling) or "robust"
(scale using quantiles 0.25-0.75). Defaults to "standard".
groups (List[str], optional): Group names to normalize by. Defaults to [].
center (bool, optional): If to center the output to zero. Defaults to True.
scale_by_group (bool, optional): If to scale the output by group, i.e. norm is calculated as
``(group1_norm * group2_norm * ...) ^ (1 / n_groups)``. Defaults to False.
log_scale (bool, optional): If to take log of values. Defaults to False. Defaults to False.
log_zero_value (float, optional): Value to map 0 to for ``log_scale=True`` or in softplus. Defaults to 0.0
coerce_positive (Union[bool, float, str], optional): If to coerce output to positive. Valid values:
* None, i.e. is automatically determined and might change to True if all values are >= 0 (Default).
* True, i.e. output is clamped at 0.
* False, i.e. values are not coerced
* float, i.e. softmax is applied with beta = coerce_positive.
eps (float, optional): Number for numerical stability of calcualtions.
Defaults to 1e-8. For count data, 1.0 is recommended.
"""
self.groups = groups
self.scale_by_group = scale_by_group
super().__init__(
method=method,
center=center,
log_scale=log_scale,
log_zero_value=log_zero_value,
coerce_positive=coerce_positive,
eps=eps,
)
|
def __init__(
self,
method: str = "standard",
groups: List[str] = [],
center: bool = True,
scale_by_group: bool = False,
log_scale: Union[bool, float] = False,
log_zero_value: float = 0.0,
coerce_positive: Union[float, bool] = None,
eps: float = 1e-8,
):
"""
Group normalizer to normalize a given entry by groups. Can be used as target normalizer.
Args:
method (str, optional): method to rescale series. Either "standard" (standard scaling) or "robust"
(scale using quantiles 0.25-0.75). Defaults to "standard".
groups (List[str], optional): Group names to normalize by. Defaults to [].
center (bool, optional): If to center the output to zero. Defaults to True.
scale_by_group (bool, optional): If to scale the output by group, i.e. norm is calculated as
``(group1_norm * group2_norm * ...) ^ (1 / n_groups)``. Defaults to False.
log_scale (bool, optional): If to take log of values. Defaults to False. Defaults to False.
log_zero_value (float, optional): Value to map 0 to for ``log_scale=True`` or in softplus. Defaults to 0.0
coerce_positive (Union[bool, float, str], optional): If to coerce output to positive. Valid values:
* None, i.e. is automatically determined and might change to True if all values are >= 0 (Default).
* True, i.e. output is clamped at 0.
* False, i.e. values are not coerced
* float, i.e. softmax is applied with beta = coerce_positive.
eps (float, optional): Number for numerical stability of calcualtions. Defaults to 1e-8.
"""
self.groups = groups
self.scale_by_group = scale_by_group
super().__init__(
method=method,
center=center,
log_scale=log_scale,
log_zero_value=log_zero_value,
coerce_positive=coerce_positive,
eps=eps,
)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def fit(self, y: pd.Series, X: pd.DataFrame):
"""
Determine scales for each group
Args:
y (pd.Series): input data
X (pd.DataFrame): dataframe with columns for each group defined in ``groups`` parameter.
Returns:
self
"""
if self.coerce_positive is None and not self.log_scale:
self.coerce_positive = (y >= 0).all()
y = self._preprocess_y(y)
if len(self.groups) == 0:
assert not self.scale_by_group, (
"No groups are defined, i.e. `scale_by_group=[]`"
)
if self.method == "standard":
self.norm_ = [
np.mean(y) + self.eps,
np.std(y) + self.eps,
] # center and scale
else:
quantiles = np.quantile(y, [0.25, 0.5, 0.75])
self.norm_ = [
quantiles[1] + self.eps,
(quantiles[2] - quantiles[0]) / 2.0 + self.eps,
] # center and scale
if not self.center:
self.norm_[1] = self.norm_[0]
self.norm_[0] = 0.0
elif self.scale_by_group:
if self.method == "standard":
self.norm_ = {
g: X[[g]]
.assign(y=y)
.groupby(g, observed=True)
.agg(center=("y", "mean"), scale=("y", "std"))
.assign(
center=lambda x: x["center"] + self.eps,
scale=lambda x: x.scale + self.eps,
)
for g in self.groups
}
else:
self.norm_ = {
g: X[[g]]
.assign(y=y)
.groupby(g, observed=True)
.y.quantile([0.25, 0.5, 0.75])
.unstack(-1)
.assign(
center=lambda x: x[0.5] + self.eps,
scale=lambda x: (x[0.75] - x[0.25]) / 2.0 + self.eps,
)[["center", "scale"]]
for g in self.groups
}
# calculate missings
if not self.center: # swap center and scale
def swap_parameters(norm):
norm["scale"] = norm["center"]
norm["center"] = 0.0
return norm
self.norm = {g: swap_parameters(norm) for g, norm in self.norm_.items()}
self.missing_ = {
group: scales.median().to_dict() for group, scales in self.norm_.items()
}
else:
if self.method == "standard":
self.norm_ = (
X[self.groups]
.assign(y=y)
.groupby(self.groups, observed=True)
.agg(center=("y", "mean"), scale=("y", "std"))
.assign(
center=lambda x: x["center"] + self.eps,
scale=lambda x: x.scale + self.eps,
)
)
else:
self.norm_ = (
X[self.groups]
.assign(y=y)
.groupby(self.groups, observed=True)
.y.quantile([0.25, 0.5, 0.75])
.unstack(-1)
.assign(
center=lambda x: x[0.5] + self.eps,
scale=lambda x: (x[0.75] - x[0.25]) / 2.0 + self.eps,
)[["center", "scale"]]
)
if not self.center: # swap center and scale
self.norm_["scale"] = self.norm_["center"]
self.norm_["center"] = 0.0
self.missing_ = self.norm_.median().to_dict()
return self
|
def fit(self, y: pd.Series, X: pd.DataFrame):
"""
Determine scales for each group
Args:
y (pd.Series): input data
X (pd.DataFrame): dataframe with columns for each group defined in ``groups`` parameter.
Returns:
self
"""
y = self._preprocess_y(y)
if len(self.groups) == 0:
assert not self.scale_by_group, (
"No groups are defined, i.e. `scale_by_group=[]`"
)
if self.method == "standard":
mean = np.mean(y)
self.norm_ = mean, np.std(y) / (mean + self.eps)
else:
quantiles = np.quantile(y, [0.25, 0.5, 0.75])
self.norm_ = (
quantiles[1],
(quantiles[2] - quantiles[0]) / (quantiles[1] + self.eps),
)
elif self.scale_by_group:
if self.method == "standard":
self.norm_ = {
g: X[[g]]
.assign(y=y)
.groupby(g, observed=True)
.agg(mean=("y", "mean"), scale=("y", "std"))
.assign(scale=lambda x: x.scale / (x["mean"] + self.eps))
for g in self.groups
}
else:
self.norm_ = {
g: X[[g]]
.assign(y=y)
.groupby(g, observed=True)
.y.quantile([0.25, 0.5, 0.75])
.unstack(-1)
.assign(
median=lambda x: x[0.5] + self.eps,
scale=lambda x: (x[0.75] - x[0.25] + self.eps)
/ (x[0.5] + self.eps),
)[["median", "scale"]]
for g in self.groups
}
# calculate missings
self.missing_ = {
group: scales.median().to_dict() for group, scales in self.norm_.items()
}
else:
if self.method == "standard":
self.norm_ = (
X[self.groups]
.assign(y=y)
.groupby(self.groups, observed=True)
.agg(mean=("y", "mean"), scale=("y", "std"))
.assign(scale=lambda x: x.scale / (x["mean"] + self.eps))
)
else:
self.norm_ = (
X[self.groups]
.assign(y=y)
.groupby(self.groups, observed=True)
.y.quantile([0.25, 0.5, 0.75])
.unstack(-1)
.assign(
median=lambda x: x[0.5] + self.eps,
scale=lambda x: (x[0.75] - x[0.25] + self.eps)
/ (x[0.5] + self.eps)
/ 2.0,
)[["median", "scale"]]
)
self.missing_ = self.norm_.median().to_dict()
return self
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def names(self) -> List[str]:
"""
Names of determined scales.
Returns:
List[str]: list of names
"""
return ["center", "scale"]
|
def names(self) -> List[str]:
"""
Names of determined scales.
Returns:
List[str]: list of names
"""
if self.method == "standard":
return ["mean", "scale"]
else:
return ["median", "scale"]
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def inverse_transform(self, y: pd.Series, X: pd.DataFrame):
"""
Rescaling data to original scale - not implemented - call class with target scale instead.
"""
raise NotImplementedError()
|
def inverse_transform(self, y: pd.Series, X: pd.DataFrame):
"""
Rescaling data to original scale - not implemented.
"""
raise NotImplementedError()
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def transform(
self,
y: pd.Series,
X: pd.DataFrame = None,
return_norm: bool = False,
target_scale: torch.Tensor = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Scale input data.
Args:
y (pd.Series): data to scale
X (pd.DataFrame): dataframe with ``groups`` columns
return_norm (bool, optional): If to return . Defaults to False.
target_scale (torch.Tensor): target scale to use instead of fitted center and scale
Returns:
Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: Scaled data, if ``return_norm=True``, returns also scales
as second element
"""
if target_scale is None:
assert X is not None, "either target_scale or X has to be passed"
target_scale = self.get_norm(X)
return super().transform(y=y, return_norm=return_norm, target_scale=target_scale)
|
def transform(
self, y: pd.Series, X: pd.DataFrame, return_norm: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Scale input data.
Args:
y (pd.Series): data to scale
X (pd.DataFrame): dataframe with ``groups`` columns
return_norm (bool, optional): If to return . Defaults to False.
Returns:
Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: Scaled data, if ``return_norm=True``, returns also scales
as second element
"""
norm = self.get_norm(X)
y = self._preprocess_y(y)
if self.center:
y_normed = (y / (norm[:, 0] + self.eps) - 1) / (norm[:, 1] + self.eps)
else:
y_normed = y / (norm[:, 0] + self.eps)
if return_norm:
return y_normed, norm
else:
return y_normed
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def _preprocess_data(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Scale continuous variables, encode categories and set aside target and weight.
Args:
data (pd.DataFrame): original data
Returns:
pd.DataFrame: pre-processed dataframe
"""
# encode categoricals
for name in set(self.categoricals + self.group_ids):
allow_nans = name in self.dropout_categoricals
if name in self.variable_groups: # fit groups
columns = self.variable_groups[name]
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[columns].to_numpy().reshape(-1))
elif self.categorical_encoders[name] is not None:
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[columns].to_numpy().reshape(-1))
else:
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[name])
elif self.categorical_encoders[name] is not None:
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[name])
# encode them
for name in set(self.flat_categoricals + self.group_ids):
data[name] = self.transform_values(name, data[name], inverse=False)
# save special variables
assert "__time_idx__" not in data.columns, (
"__time_idx__ is a protected column and must not be present in data"
)
data["__time_idx__"] = data[self.time_idx] # save unscaled
assert "__target__" not in data.columns, (
"__target__ is a protected column and must not be present in data"
)
data["__target__"] = data[self.target]
if self.weight is not None:
data["__weight__"] = data[self.weight]
# train target normalizer
if self.target_normalizer is not None:
# fit target normalizer
try:
check_is_fitted(self.target_normalizer)
except NotFittedError:
if isinstance(self.target_normalizer, EncoderNormalizer):
self.target_normalizer.fit(data[self.target])
elif isinstance(self.target_normalizer, GroupNormalizer):
self.target_normalizer.fit(data[self.target], data)
else:
self.target_normalizer.fit(data[self.target])
# transform target
if isinstance(self.target_normalizer, EncoderNormalizer):
# we approximate the scales and target transformation by assuming one
# transformation over the entire time range but by each group
common_init_args = [
name
for name in inspect.signature(
GroupNormalizer.__init__
).parameters.keys()
if name
in inspect.signature(EncoderNormalizer.__init__).parameters.keys()
and name not in ["data", "self"]
]
copy_kwargs = {
name: getattr(self.target_normalizer, name) for name in common_init_args
}
normalizer = GroupNormalizer(groups=self.group_ids, **copy_kwargs)
data[self.target], scales = normalizer.fit_transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, GroupNormalizer):
data[self.target], scales = self.target_normalizer.transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, NaNLabelEncoder):
data[self.target] = self.target_normalizer.transform(data[self.target])
else:
data[self.target], scales = self.target_normalizer.transform(
data[self.target], return_norm=True
)
# add target scales
if self.add_target_scales:
for idx, name in enumerate(["center", "scale"]):
feature_name = f"{self.target}_{name}"
assert feature_name not in data.columns, (
f"{feature_name} is a protected column and must not be present in data"
)
data[feature_name] = scales[:, idx].squeeze()
if feature_name not in self.reals:
self.static_reals.append(feature_name)
if self.target in self.reals:
self.scalers[self.target] = self.target_normalizer
# rescale continuous variables apart from target
for name in self.reals:
if name not in self.scalers:
self.scalers[name] = StandardScaler().fit(data[[name]])
elif self.scalers[name] is not None:
try:
check_is_fitted(self.scalers[name])
except NotFittedError:
if isinstance(self.scalers[name], GroupNormalizer):
self.scalers[name] = self.scalers[name].fit(data[[name]], data)
else:
self.scalers[name] = self.scalers[name].fit(data[[name]])
if self.scalers[name] is not None and name != self.target:
data[name] = self.transform_values(
name, data[name], data=data, inverse=False
)
# encode constant values
self.encoded_constant_fill_strategy = {}
for name, value in self.constant_fill_strategy.items():
if name == self.target:
self.encoded_constant_fill_strategy["__target__"] = value
self.encoded_constant_fill_strategy[name] = self.transform_values(
name, np.array([value]), data=data, inverse=False
)[0]
return data
|
def _preprocess_data(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Scale continuous variables, encode categories and set aside target and weight.
Args:
data (pd.DataFrame): original data
Returns:
pd.DataFrame: pre-processed dataframe
"""
# encode categoricals
for name in set(self.categoricals + self.group_ids):
allow_nans = name in self.dropout_categoricals
if name in self.variable_groups: # fit groups
columns = self.variable_groups[name]
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[columns].to_numpy().reshape(-1))
elif self.categorical_encoders[name] is not None:
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[columns].to_numpy().reshape(-1))
else:
if name not in self.categorical_encoders:
self.categorical_encoders[name] = NaNLabelEncoder(
add_nan=allow_nans
).fit(data[name])
elif self.categorical_encoders[name] is not None:
try:
check_is_fitted(self.categorical_encoders[name])
except NotFittedError:
self.categorical_encoders[name] = self.categorical_encoders[
name
].fit(data[name])
# encode them
for name in set(self.flat_categoricals + self.group_ids):
data[name] = self.transform_values(name, data[name], inverse=False)
# save special variables
assert "__time_idx__" not in data.columns, (
"__time_idx__ is a protected column and must not be present in data"
)
data["__time_idx__"] = data[self.time_idx] # save unscaled
assert "__target__" not in data.columns, (
"__target__ is a protected column and must not be present in data"
)
data["__target__"] = data[self.target]
if self.weight is not None:
data["__weight__"] = data[self.weight]
# train target normalizer
if self.target_normalizer is not None:
# fit target normalizer
try:
check_is_fitted(self.target_normalizer)
except NotFittedError:
if isinstance(self.target_normalizer, EncoderNormalizer):
self.target_normalizer.fit(data[self.target])
elif isinstance(self.target_normalizer, GroupNormalizer):
self.target_normalizer.fit(data[self.target], data)
else:
self.target_normalizer.fit(data[self.target])
# transform target
if isinstance(self.target_normalizer, EncoderNormalizer):
# we approximate the scales and target transformation by assuming one
# transformation over the entire time range but by each group
common_init_args = [
name
for name in inspect.signature(GroupNormalizer).parameters.keys()
if name in inspect.signature(EncoderNormalizer).parameters.keys()
]
copy_kwargs = {
name: getattr(self.target_normalizer, name) for name in common_init_args
}
normalizer = GroupNormalizer(groups=self.group_ids, **copy_kwargs)
data[self.target], scales = normalizer.fit_transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, GroupNormalizer):
data[self.target], scales = self.target_normalizer.transform(
data[self.target], data, return_norm=True
)
elif isinstance(self.target_normalizer, NaNLabelEncoder):
data[self.target] = self.target_normalizer.transform(data[self.target])
else:
data[self.target], scales = self.target_normalizer.transform(
data[self.target], return_norm=True
)
# add target scales
if self.add_target_scales:
for idx, name in enumerate(["center", "scale"]):
feature_name = f"{self.target}_{name}"
assert feature_name not in data.columns, (
f"{feature_name} is a protected column and must not be present in data"
)
data[feature_name] = scales[:, idx].squeeze()
if feature_name not in self.reals:
self.static_reals.append(feature_name)
if self.target in self.reals:
self.scalers[self.target] = self.target_normalizer
# rescale continuous variables apart from target
for name in self.reals:
if name not in self.scalers:
self.scalers[name] = StandardScaler().fit(data[[name]])
elif self.scalers[name] is not None:
try:
check_is_fitted(self.scalers[name])
except NotFittedError:
if isinstance(self.scalers[name], GroupNormalizer):
self.scalers[name] = self.scalers[name].fit(data[[name]], data)
else:
self.scalers[name] = self.scalers[name].fit(data[[name]])
if self.scalers[name] is not None and name != self.target:
data[name] = self.transform_values(
name, data[name], data=data, inverse=False
)
# encode constant values
self.encoded_constant_fill_strategy = {}
for name, value in self.constant_fill_strategy.items():
if name == self.target:
self.encoded_constant_fill_strategy["__target__"] = value
self.encoded_constant_fill_strategy[name] = self.transform_values(
name, np.array([value]), data=data, inverse=False
)[0]
return data
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def _construct_index(self, data: pd.DataFrame, predict_mode: bool) -> pd.DataFrame:
"""
Create index of samples.
Args:
data (pd.DataFrame): preprocessed data
predict_mode (bool): if to create one same per group with prediction length equals ``max_decoder_length``
Returns:
pd.DataFrame: index dataframe
"""
g = data.groupby(self.group_ids, observed=True)
df_index_first = g["__time_idx__"].transform("nth", 0).to_frame("time_first")
df_index_last = g["__time_idx__"].transform("nth", -1).to_frame("time_last")
df_index_diff_to_next = (
-g["__time_idx__"].diff(-1).fillna(-1).astype(int).to_frame("time_diff_to_next")
)
df_index = pd.concat([df_index_first, df_index_last, df_index_diff_to_next], axis=1)
df_index["index_start"] = np.arange(len(df_index))
df_index["time"] = data["__time_idx__"]
df_index["count"] = (df_index["time_last"] - df_index["time_first"]).astype(int) + 1
group_ids = g.ngroup()
df_index["group_id"] = group_ids
min_sequence_length = self.min_prediction_length + self.min_encoder_length
max_sequence_length = self.max_prediction_length + self.max_encoder_length
# calculate maximum index to include from current index_start
max_time = (df_index["time"] + max_sequence_length - 1).clip(
upper=df_index["count"] + df_index.time_first - 1
)
# if there are missing timesteps, we cannot say directly what is the last timestep to include
# therefore we iterate until it is found
if (df_index["time_diff_to_next"] != 1).any():
assert self.allow_missings, (
"Time difference between steps has been idenfied as larger than 1 - set allow_missings=True"
)
df_index["index_end"], missing_sequences = _find_end_indices(
diffs=df_index.time_diff_to_next.to_numpy(),
max_lengths=(max_time - df_index.time).to_numpy() + 1,
min_length=min_sequence_length,
)
# add duplicates but mostly with shorter sequence length for start of timeseries
# while the previous steps have ensured that we start a sequence on every time step, the missing_sequences
# ensure that there is a sequence that finishes on every timestep
if len(missing_sequences) > 0:
shortened_sequences = df_index.iloc[missing_sequences[:, 0]].assign(
index_end=missing_sequences[:, 1]
)
# concatenate shortened sequences
df_index = pd.concat([df_index, shortened_sequences], axis=0, ignore_index=True)
# filter out where encode and decode length are not satisfied
df_index["sequence_length"] = (
df_index["time"].iloc[df_index["index_end"]].to_numpy() - df_index["time"] + 1
)
# filter too short sequences
df_index = df_index[
# sequence must be at least of minimal prediction length
lambda x: (x.sequence_length >= min_sequence_length)
&
# prediction must be for after minimal prediction index + length of prediction
(
x["sequence_length"] + x["time"]
>= self.min_prediction_idx + self.min_prediction_length
)
]
if predict_mode: # keep longest element per series (i.e. the first element that spans to the end of the series)
# filter all elements that are longer than the allowed maximum sequence length
df_index = df_index[
lambda x: (x["time_last"] - x["time"] + 1 <= max_sequence_length)
& (x["sequence_length"] >= min_sequence_length)
]
# choose longest sequence
df_index = df_index.loc[df_index.groupby("group_id").sequence_length.idxmax()]
# check that all groups/series have at least one entry in the index
if not group_ids.isin(df_index.group_id).all():
missing_groups = data.loc[
~group_ids.isin(df_index.group_id), self.group_ids
].drop_duplicates()
# decode values
for name in missing_groups.columns:
missing_groups[name] = self.transform_values(
name, missing_groups[name], inverse=True
)
warnings.warn(
(
"Min encoder length and/or min_prediction_idx and/or min prediction length is too large for "
f"{len(missing_groups)} series/groups which therefore are not present in the dataset index. "
"This means no predictions can be made for those series",
f"First 10 removed groups: {list(missing_groups.iloc[:10].to_dict(orient='index').values())}",
),
UserWarning,
)
assert len(df_index) > 0, "filters should not remove entries"
return df_index
|
def _construct_index(self, data: pd.DataFrame, predict_mode: bool) -> pd.DataFrame:
"""
Create index of samples.
Args:
data (pd.DataFrame): preprocessed data
predict_mode (bool): if to create one same per group with prediction length equals ``max_decoder_length``
Returns:
pd.DataFrame: index dataframe
"""
g = data.groupby(self.group_ids, observed=True)
df_index_first = g["__time_idx__"].transform("nth", 0).to_frame("time_first")
df_index_last = g["__time_idx__"].transform("nth", -1).to_frame("time_last")
df_index_diff_to_next = (
-g["__time_idx__"].diff(-1).fillna(-1).astype(int).to_frame("time_diff_to_next")
)
df_index = pd.concat([df_index_first, df_index_last, df_index_diff_to_next], axis=1)
df_index["index_start"] = np.arange(len(df_index))
df_index["time"] = data["__time_idx__"]
df_index["count"] = (df_index["time_last"] - df_index["time_first"]).astype(int) + 1
group_ids = g.ngroup()
df_index["group_id"] = group_ids
min_sequence_length = self.min_prediction_length + self.min_encoder_length
max_sequence_length = self.max_prediction_length + self.max_encoder_length
# calculate maximum index to include from current index_start
max_time = (df_index["time"] + max_sequence_length - 1).clip(
upper=df_index["count"] + df_index.time_first - 1
)
# if there are missing timesteps, we cannot say directly what is the last timestep to include
# therefore we iterate until it is found
if (df_index["time_diff_to_next"] != 1).any():
assert self.allow_missings, (
"Time difference between steps has been idenfied as larger than 1 - set allow_missings=True"
)
df_index["index_end"], missing_sequences = _find_end_indices(
diffs=df_index.time_diff_to_next.to_numpy(),
max_lengths=(max_time - df_index.time).to_numpy() + 1,
min_length=min_sequence_length,
)
# add duplicates but mostly with shorter sequence length for start of timeseries
# while the previous steps have ensured that we start a sequence on every time step, the missing_sequences
# ensure that there is a sequence that finishes on every timestep
if len(missing_sequences) > 0:
shortened_sequences = df_index.iloc[missing_sequences[:, 0]].assign(
index_end=missing_sequences[:, 1]
)
# concatenate shortened sequences
df_index = pd.concat([df_index, shortened_sequences], axis=0, ignore_index=True)
# filter out where encode and decode length are not satisfied
df_index["sequence_length"] = (
df_index["time"].iloc[df_index["index_end"]].to_numpy() - df_index["time"] + 1
)
# filter too short sequences
df_index = df_index[
# sequence must be at least of minimal prediction length
lambda x: (x.sequence_length >= min_sequence_length)
&
# prediction must be for after minimal prediction index + length of prediction
(
x["sequence_length"] + x["time"]
>= self.min_prediction_idx + self.min_prediction_length
)
]
if predict_mode: # keep longest element per series (i.e. the first element that spans to the end of the series)
# filter all elements that are longer than the allowed maximum sequence length
df_index = df_index[
lambda x: (x["time_last"] - x["time"] + 1 <= max_sequence_length)
& (x["sequence_length"] >= min_sequence_length)
]
# choose longest sequence
df_index = df_index.loc[df_index.groupby("group_id").sequence_length.idxmax()]
assert len(df_index) > 0, "filters should not remove entries"
# check that all groups/series have at least one entry in the index
if not group_ids.isin(df_index.group_id).all():
missing_groups = data.loc[
~group_ids.isin(df_index.group_id), self.group_ids
].drop_duplicates()
# decode values
for name in missing_groups.columns:
missing_groups[name] = self.transform_values(
name, missing_groups[name], inverse=True
)
raise ValueError(
f"Min encoder length and/or min prediction length is too large for {len(missing_groups)} series/group. "
f"First 10 examples: {list(missing_groups.iloc[:10].to_dict(orient='index').values())}"
)
return df_index
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def x_to_index(self, x: Dict[str, torch.Tensor]) -> pd.DataFrame:
"""
Decode dataframe index from x.
Returns:
dataframe with time index column for first prediction and group ids
"""
index_data = {self.time_idx: x["decoder_time_idx"][:, 0].cpu()}
for id in self.group_ids:
index_data[id] = x["groups"][:, self.group_ids.index(id)].cpu()
# decode if possible
index_data[id] = self.transform_values(id, index_data[id], inverse=True)
index = pd.DataFrame(index_data)
return index
|
def x_to_index(self, x) -> pd.DataFrame:
"""
Decode dataframe index from x.
Returns:
dataframe with time index column for first prediction and group ids
"""
index_data = {self.time_idx: x["decoder_time_idx"][:, 0]}
for id in self.group_ids:
index_data[id] = x["groups"][:, self.group_ids.index(id)]
# decode if possible
index_data[id] = self.transform_values(id, index_data[id], inverse=True)
index = pd.DataFrame(index_data)
return index
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def __init__(
self,
name: str = None,
quantiles: List[float] = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98],
reduction="mean",
):
"""
Initialize metric
Args:
name (str): metric name. Defaults to class name.
quantiles (List[float], optional): quantiles for probability range. Defaults to None.
reduction (str, optional): Reduction, "none", "mean" or "sqrt-mean". Defaults to "mean".
"""
super().__init__(name=name, quantiles=quantiles, reduction=reduction)
|
def __init__(self, reduction="sqrt-mean", **kwargs):
super().__init__(reduction=reduction, **kwargs)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def to_prediction(self, y_pred: torch.Tensor) -> torch.Tensor:
"""
Convert network prediction into a point prediction.
Args:
y_pred: prediction output of network (with ``output_type = samples``)
Returns:
torch.Tensor: mean prediction
"""
return y_pred.mean(-1)
|
def to_prediction(self, out: Dict[str, torch.Tensor]):
rate = torch.exp(super().to_prediction(out))
return rate
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def to_quantiles(self, y_pred: torch.Tensor) -> torch.Tensor:
"""
Convert network prediction into a quantile prediction.
Args:
y_pred: prediction output of network (with ``output_type = samples``)
Returns:
torch.Tensor: prediction quantiles (last dimension)
"""
samples = y_pred.size(-1)
quantiles = torch.stack(
[
torch.kthvalue(y_pred, int(samples * q), dim=-1)[0]
if samples > 1
else y_pred[..., 0]
for q in self.quantiles
],
dim=-1,
)
return quantiles
|
def to_quantiles(self, out: Dict[str, torch.Tensor], quantiles=None):
if quantiles is None:
if self.quantiles is None:
quantiles = [0.5]
else:
quantiles = self.quantiles
predictions = super().to_prediction(out)
return torch.stack(
[torch.tensor(scipy.stats.poisson(predictions).ppf(q)) for q in quantiles],
dim=-1,
)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def loss(self, y_pred: torch.Tensor, y_actual: torch.Tensor) -> torch.Tensor:
"""
Calculate negative likelihood
Args:
y_pred: network output
y_actual: actual values
Returns:
torch.Tensor: metric value on which backpropagation can be applied
"""
distribution = self.map_x_to_distribution(y_pred)
loss = -distribution.log_prob(y_actual)
return loss
|
def loss(self, y_pred, target, scaling):
return (y_pred - target).abs() / scaling.unsqueeze(-1)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def step(
self,
x: Dict[str, torch.Tensor],
y: torch.Tensor,
batch_idx: int,
label="train",
**kwargs,
):
"""
Run for each train/val step.
"""
# pack y sequence if different encoder lengths exist
if (x["decoder_lengths"] < x["decoder_lengths"].max()).any():
y = rnn.pack_padded_sequence(
y, lengths=x["decoder_lengths"], batch_first=True, enforce_sorted=False
)
if label == "train" and len(self.hparams.monotone_constaints) > 0:
# calculate gradient with respect to continous decoder features
x["decoder_cont"].requires_grad_(True)
assert not torch._C._get_cudnn_enabled(), (
"To use monotone constraints, wrap model and training in context "
"`torch.backends.cudnn.flags(enable=False)`"
)
out = self(x, **kwargs)
out["prediction"] = self.transform_output(out)
prediction = out["prediction"]
gradient = torch.autograd.grad(
outputs=prediction,
inputs=x["decoder_cont"],
grad_outputs=torch.ones_like(prediction), # t
create_graph=True, # allows usage in graph
allow_unused=True,
)[0]
# select relevant features
indices = torch.tensor(
[
self.hparams.x_reals.index(name)
for name in self.hparams.monotone_constaints.keys()
]
)
monotonicity = torch.tensor(
[val for val in self.hparams.monotone_constaints.values()],
dtype=gradient.dtype,
device=gradient.device,
)
# add additionl loss if gradient points in wrong direction
gradient = gradient[..., indices] * monotonicity[None, None]
monotinicity_loss = gradient.clamp_max(0).mean()
# multiply monotinicity loss by large number to ensure relevance and take to the power of 2
# for smoothness of loss function
monotinicity_loss = 10 * torch.pow(monotinicity_loss, 2)
if isinstance(self.loss, MASE):
loss = self.loss(
prediction,
y,
encoder_target=x["encoder_target"],
encoder_lengths=x["encoder_lengths"],
)
else:
loss = self.loss(prediction, y)
loss = loss * (1 + monotinicity_loss)
else:
out = self(x, **kwargs)
out["prediction"] = self.transform_output(out)
# calculate loss
prediction = out["prediction"]
if isinstance(self.loss, MASE):
loss = self.loss(
prediction,
y,
encoder_target=x["encoder_target"],
encoder_lengths=x["encoder_lengths"],
)
else:
loss = self.loss(prediction, y)
# log
self._log_metrics(x, y, out, label=label)
if self.log_interval(label == "train") > 0:
self._log_prediction(x, out, batch_idx, label=label)
log = {"loss": loss, "n_samples": x["decoder_lengths"].size(0)}
return log, out
|
def step(
self, x: Dict[str, torch.Tensor], y: torch.Tensor, batch_idx: int, label="train"
):
"""
Run for each train/val step.
"""
# pack y sequence if different encoder lengths exist
if (x["decoder_lengths"] < x["decoder_lengths"].max()).any():
y = rnn.pack_padded_sequence(
y, lengths=x["decoder_lengths"], batch_first=True, enforce_sorted=False
)
if label == "train" and len(self.hparams.monotone_constaints) > 0:
# calculate gradient with respect to continous decoder features
x["decoder_cont"].requires_grad_(True)
assert not torch._C._get_cudnn_enabled(), (
"To use monotone constraints, wrap model and training in context "
"`torch.backends.cudnn.flags(enable=False)`"
)
out = self(x)
out["prediction"] = self.transform_output(out)
prediction = out["prediction"]
gradient = torch.autograd.grad(
outputs=prediction,
inputs=x["decoder_cont"],
grad_outputs=torch.ones_like(prediction), # t
create_graph=True, # allows usage in graph
allow_unused=True,
)[0]
# select relevant features
indices = torch.tensor(
[
self.hparams.x_reals.index(name)
for name in self.hparams.monotone_constaints.keys()
]
)
monotonicity = torch.tensor(
[val for val in self.hparams.monotone_constaints.values()],
dtype=gradient.dtype,
device=gradient.device,
)
# add additionl loss if gradient points in wrong direction
gradient = gradient[..., indices] * monotonicity[None, None]
monotinicity_loss = gradient.clamp_max(0).mean()
# multiply monotinicity loss by large number to ensure relevance and take to the power of 2
# for smoothness of loss function
monotinicity_loss = 10 * torch.pow(monotinicity_loss, 2)
if isinstance(self.loss, MASE):
loss = self.loss(
prediction,
y,
encoder_target=x["encoder_target"],
encoder_lengths=x["encoder_lengths"],
)
else:
loss = self.loss(prediction, y)
loss = loss * (1 + monotinicity_loss)
else:
out = self(x)
out["prediction"] = self.transform_output(out)
# calculate loss
prediction = out["prediction"]
if isinstance(self.loss, MASE):
loss = self.loss(
prediction,
y,
encoder_target=x["encoder_target"],
encoder_lengths=x["encoder_lengths"],
)
else:
loss = self.loss(prediction, y)
# logging losses
y_hat_detached = prediction.detach()
y_hat_point_detached = self.loss.to_prediction(y_hat_detached)
for metric in self.logging_metrics:
if isinstance(metric, MASE):
loss_value = metric(
y_hat_point_detached,
y,
encoder_target=x["encoder_target"],
encoder_lengths=x["encoder_lengths"],
)
else:
loss_value = metric(y_hat_point_detached, y)
self.log(
f"{label}_{metric.name}",
loss_value,
on_step=label == "train",
on_epoch=True,
)
log = {"loss": loss, "n_samples": x["decoder_lengths"].size(0)}
if self.log_interval(label == "train") > 0:
self._log_prediction(x, out, batch_idx, label=label)
return log, out
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def _log_prediction(self, x, out, batch_idx, label="train") -> None:
# log single prediction figure
log_interval = self.log_interval(label == "train")
if (batch_idx % log_interval == 0 or log_interval < 1.0) and log_interval > 0:
if log_interval < 1.0: # log multiple steps
log_indices = torch.arange(
0,
len(x["encoder_lengths"]),
max(1, round(log_interval * len(x["encoder_lengths"]))),
)
else:
log_indices = [0]
for idx in log_indices:
fig = self.plot_prediction(x, out, idx=idx, add_loss_to_title=True)
tag = f"{label.capitalize()} prediction"
if label == "train":
tag += f" of item {idx} in global batch {self.global_step}"
else:
tag += f" of item {idx} in batch {batch_idx}"
self.logger.experiment.add_figure(
tag,
fig,
global_step=self.global_step,
)
|
def _log_prediction(self, x, out, batch_idx, label="train"):
# log single prediction figure
log_interval = self.log_interval(label == "train")
if (batch_idx % log_interval == 0 or log_interval < 1.0) and log_interval > 0:
if log_interval < 1.0: # log multiple steps
log_indices = torch.arange(
0,
len(x["encoder_lengths"]),
max(1, round(log_interval * len(x["encoder_lengths"]))),
)
else:
log_indices = [0]
for idx in log_indices:
fig = self.plot_prediction(x, out, idx=idx, add_loss_to_title=True)
tag = f"{label.capitalize()} prediction"
if label == "train":
tag += f" of item {idx} in global batch {self.global_step}"
else:
tag += f" of item {idx} in batch {batch_idx}"
self.logger.experiment.add_figure(
tag,
fig,
global_step=self.global_step,
)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def plot_prediction(
self,
x: Dict[str, torch.Tensor],
out: Dict[str, torch.Tensor],
idx: int = 0,
add_loss_to_title: Union[Metric, torch.Tensor, bool] = False,
show_future_observed: bool = True,
ax=None,
) -> plt.Figure:
"""
Plot prediction of prediction vs actuals
Args:
x: network input
out: network output
idx: index of prediction to plot
add_loss_to_title: if to add loss to title or loss function to calculate. Can be either metrics,
bool indicating if to use loss metric or tensor which contains losses for all samples. Default to False.
show_future_observed: if to show actuals for future. Defaults to True.
ax: matplotlib axes to plot on
Returns:
matplotlib figure
"""
# all true values for y of the first sample in batch
y_all = torch.cat([x["encoder_target"][idx], x["decoder_target"][idx]])
if y_all.ndim == 2: # timesteps, (target, weight), i.e. weight is included
y_all = y_all[:, 0]
max_encoder_length = x["encoder_lengths"].max()
y = torch.cat(
(
y_all[: x["encoder_lengths"][idx]],
y_all[
max_encoder_length : (max_encoder_length + x["decoder_lengths"][idx])
],
),
)
# get predictions
y_pred = out["prediction"].detach().cpu()
y_hat = y_pred[idx, : x["decoder_lengths"][idx]]
# move to cpu
y = y.detach().cpu()
# create figure
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
n_pred = y_hat.shape[0]
x_obs = np.arange(-(y.shape[0] - n_pred), 0)
x_pred = np.arange(n_pred)
prop_cycle = iter(plt.rcParams["axes.prop_cycle"])
obs_color = next(prop_cycle)["color"]
pred_color = next(prop_cycle)["color"]
# plot observed history
if len(x_obs) > 0:
if len(x_obs) > 1:
plotter = ax.plot
else:
plotter = ax.scatter
plotter(x_obs, y[:-n_pred], label="observed", c=obs_color)
if len(x_pred) > 1:
plotter = ax.plot
else:
plotter = ax.scatter
# plot observed prediction
if show_future_observed:
plotter(x_pred, y[-n_pred:], label=None, c=obs_color)
# plot prediction
plotter(
x_pred,
self.loss.to_prediction(y_hat.unsqueeze(0))[0],
label="predicted",
c=pred_color,
)
# plot predicted quantiles
y_quantiles = self.loss.to_quantiles(y_hat.unsqueeze(0))[0]
plotter(x_pred, y_quantiles[:, y_quantiles.shape[1] // 2], c=pred_color, alpha=0.15)
for i in range(y_quantiles.shape[1] // 2):
if len(x_pred) > 1:
ax.fill_between(
x_pred,
y_quantiles[:, i],
y_quantiles[:, -i - 1],
alpha=0.15,
fc=pred_color,
)
else:
quantiles = torch.tensor([[y_quantiles[0, i]], [y_quantiles[0, -i - 1]]])
ax.errorbar(
x_pred,
y[[-n_pred]],
yerr=quantiles - y[-n_pred],
c=pred_color,
capsize=1.0,
)
if add_loss_to_title is not False:
if isinstance(add_loss_to_title, bool):
loss = self.loss
elif isinstance(add_loss_to_title, torch.Tensor):
loss = add_loss_to_title.detach()[idx].item()
elif isinstance(add_loss_to_title, Metric):
loss = add_loss_to_title
loss.quantiles = self.loss.quantiles
else:
raise ValueError(f"add_loss_to_title '{add_loss_to_title}'' is unkown")
if isinstance(loss, MASE):
loss_value = loss(y_hat[None], y[-n_pred:][None], y[:n_pred][None])
elif isinstance(loss, Metric):
loss_value = loss(y_hat[None], y[-n_pred:][None])
else:
loss_value = loss
ax.set_title(f"Loss {loss_value:.3g}")
ax.set_xlabel("Time index")
fig.legend()
return fig
|
def plot_prediction(
self,
x: Dict[str, torch.Tensor],
out: Dict[str, torch.Tensor],
idx: int = 0,
add_loss_to_title: Union[Metric, bool] = False,
show_future_observed: bool = True,
ax=None,
) -> plt.Figure:
"""
Plot prediction of prediction vs actuals
Args:
x: network input
out: network output
idx: index of prediction to plot
add_loss_to_title: if to add loss to title or loss function to calculate. Default to False.
show_future_observed: if to show actuals for future. Defaults to True.
ax: matplotlib axes to plot on
Returns:
matplotlib figure
"""
# all true values for y of the first sample in batch
y_all = torch.cat([x["encoder_target"][idx], x["decoder_target"][idx]])
if y_all.ndim == 2: # timesteps, (target, weight), i.e. weight is included
y_all = y_all[:, 0]
max_encoder_length = x["encoder_lengths"].max()
y = torch.cat(
(
y_all[: x["encoder_lengths"][idx]],
y_all[
max_encoder_length : (max_encoder_length + x["decoder_lengths"][idx])
],
),
)
# get predictions
y_pred = out["prediction"].detach().cpu()
y_hat = y_pred[idx, : x["decoder_lengths"][idx]]
# move to cpu
y = y.detach().cpu()
# create figure
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
n_pred = y_hat.shape[0]
x_obs = np.arange(-(y.shape[0] - n_pred), 0)
x_pred = np.arange(n_pred)
prop_cycle = iter(plt.rcParams["axes.prop_cycle"])
obs_color = next(prop_cycle)["color"]
pred_color = next(prop_cycle)["color"]
# plot observed history
if len(x_obs) > 0:
if len(x_obs) > 1:
plotter = ax.plot
else:
plotter = ax.scatter
plotter(x_obs, y[:-n_pred], label="observed", c=obs_color)
if len(x_pred) > 1:
plotter = ax.plot
else:
plotter = ax.scatter
# plot observed prediction
if show_future_observed:
plotter(x_pred, y[-n_pred:], label=None, c=obs_color)
# plot prediction
plotter(
x_pred,
self.loss.to_prediction(y_hat.unsqueeze(0))[0],
label="predicted",
c=pred_color,
)
# plot predicted quantiles
y_quantiles = self.loss.to_quantiles(y_hat.unsqueeze(0))[0]
plotter(x_pred, y_quantiles[:, y_quantiles.shape[1] // 2], c=pred_color, alpha=0.15)
for i in range(y_quantiles.shape[1] // 2):
if len(x_pred) > 1:
ax.fill_between(
x_pred,
y_quantiles[:, i],
y_quantiles[:, -i - 1],
alpha=0.15,
fc=pred_color,
)
else:
quantiles = torch.tensor([[y_quantiles[0, i]], [y_quantiles[0, -i - 1]]])
ax.errorbar(
x_pred,
y[[-n_pred]],
yerr=quantiles - y[-n_pred],
c=pred_color,
capsize=1.0,
)
if add_loss_to_title:
if isinstance(add_loss_to_title, bool):
loss = self.loss
else:
loss = add_loss_to_title
loss.quantiles = self.loss.quantiles
if isinstance(loss, MASE):
loss_value = loss(y_hat[None], y[-n_pred:][None], y[:n_pred][None])
else:
loss_value = loss(y_hat[None], y[-n_pred:][None])
ax.set_title(f"Loss {loss_value:.3g}")
ax.set_xlabel("Time index")
fig.legend()
return fig
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def predict(
self,
data: Union[DataLoader, pd.DataFrame, TimeSeriesDataSet],
mode: Union[str, Tuple[str, str]] = "prediction",
return_index: bool = False,
return_decoder_lengths: bool = False,
batch_size: int = 64,
num_workers: int = 0,
fast_dev_run: bool = False,
show_progress_bar: bool = False,
return_x: bool = False,
**kwargs,
):
"""
predict dataloader
Args:
dataloader: dataloader, dataframe or dataset
mode: one of "prediction", "quantiles" or "raw", or tuple ``("raw", output_name)`` where output_name is
a name in the dictionary returned by ``forward()``
return_index: if to return the prediction index
return_decoder_lengths: if to return decoder_lengths
batch_size: batch size for dataloader - only used if data is not a dataloader is passed
num_workers: number of workers for dataloader - only used if data is not a dataloader is passed
fast_dev_run: if to only return results of first batch
show_progress_bar: if to show progress bar. Defaults to False.
return_x: if to return network inputs
**kwargs: additional arguments to network's forward method
Returns:
output, x, index, decoder_lengths: some elements might not be present depending on what is configured
to be returned
"""
# convert to dataloader
if isinstance(data, pd.DataFrame):
data = TimeSeriesDataSet.from_parameters(
self.dataset_parameters, data, predict=True
)
if isinstance(data, TimeSeriesDataSet):
dataloader = data.to_dataloader(
batch_size=batch_size, train=False, num_workers=num_workers
)
else:
dataloader = data
# ensure passed dataloader is correct
assert isinstance(dataloader.dataset, TimeSeriesDataSet), (
"dataset behind dataloader mut be TimeSeriesDataSet"
)
# prepare model
self.eval() # no dropout, etc. no gradients
# run predictions
output = []
decode_lenghts = []
x_list = []
index = []
progress_bar = tqdm(
desc="Predict",
unit=" batches",
total=len(dataloader),
disable=not show_progress_bar,
)
with torch.no_grad():
for x, _ in dataloader:
# move data to appropriate device
for name in x.keys():
if x[name].device != self.device:
x[name] = x[name].to(self.device)
# make prediction
out = self(x, **kwargs) # raw output is dictionary
out["prediction"] = self.transform_output(out)
lengths = x["decoder_lengths"]
if return_decoder_lengths:
decode_lenghts.append(lengths)
nan_mask = self._get_mask(out["prediction"].size(1), lengths)
if isinstance(mode, (tuple, list)):
if mode[0] == "raw":
out = out[mode[1]]
else:
raise ValueError(
f"If a tuple is specified, the first element must be 'raw' - got {mode[0]} instead"
)
elif mode == "prediction":
out = self.loss.to_prediction(out["prediction"])
# mask non-predictions
out = out.masked_fill(nan_mask, torch.tensor(float("nan")))
elif mode == "quantiles":
out = self.loss.to_quantiles(out["prediction"])
# mask non-predictions
out = out.masked_fill(
nan_mask.unsqueeze(-1), torch.tensor(float("nan"))
)
elif mode == "raw":
pass
else:
raise ValueError(f"Unknown mode {mode} - see docs for valid arguments")
output.append(out)
if return_x:
x_list.append(x)
if return_index:
index.append(dataloader.dataset.x_to_index(x))
progress_bar.update()
if fast_dev_run:
break
# concatenate
if isinstance(mode, (tuple, list)) or mode != "raw":
output = torch.cat(output, dim=0)
elif mode == "raw":
output_cat = {}
for name in output[0].keys():
output_cat[name] = torch.cat([out[name] for out in output], dim=0)
output = output_cat
# generate output
if return_x or return_index or return_decoder_lengths:
output = [output]
if return_x:
x_cat = {}
for name in x_list[0].keys():
x_cat[name] = torch.cat([x[name] for x in x_list], dim=0)
x_cat = x_cat
output.append(x_cat)
if return_index:
output.append(pd.concat(index, axis=0, ignore_index=True))
if return_decoder_lengths:
output.append(torch.cat(decode_lenghts, dim=0))
return output
|
def predict(
self,
data: Union[DataLoader, pd.DataFrame, TimeSeriesDataSet],
mode: Union[str, Tuple[str, str]] = "prediction",
return_index: bool = False,
return_decoder_lengths: bool = False,
batch_size: int = 64,
num_workers: int = 0,
fast_dev_run: bool = False,
show_progress_bar: bool = False,
return_x: bool = False,
):
"""
predict dataloader
Args:
dataloader: dataloader, dataframe or dataset
mode: one of "prediction", "quantiles" or "raw", or tuple ``("raw", output_name)`` where output_name is
a name in the dictionary returned by ``forward()``
return_index: if to return the prediction index
return_decoder_lengths: if to return decoder_lengths
batch_size: batch size for dataloader - only used if data is not a dataloader is passed
num_workers: number of workers for dataloader - only used if data is not a dataloader is passed
fast_dev_run: if to only return results of first batch
show_progress_bar: if to show progress bar. Defaults to False.
return_x: if to return network inputs
Returns:
output, x, index, decoder_lengths: some elements might not be present depending on what is configured
to be returned
"""
# convert to dataloader
if isinstance(data, pd.DataFrame):
data = TimeSeriesDataSet.from_parameters(
self.dataset_parameters, data, predict=True
)
if isinstance(data, TimeSeriesDataSet):
dataloader = data.to_dataloader(
batch_size=batch_size, train=False, num_workers=num_workers
)
else:
dataloader = data
# ensure passed dataloader is correct
assert isinstance(dataloader.dataset, TimeSeriesDataSet), (
"dataset behind dataloader mut be TimeSeriesDataSet"
)
# prepare model
self.eval() # no dropout, etc. no gradients
# run predictions
output = []
decode_lenghts = []
x_list = []
index = []
progress_bar = tqdm(
desc="Predict",
unit=" batches",
total=len(dataloader),
disable=not show_progress_bar,
)
with torch.no_grad():
for x, _ in dataloader:
# move data to appropriate device
for name in x.keys():
if x[name].device != self.device:
x[name] = x[name].to(self.device)
# make prediction
out = self(x) # raw output is dictionary
out["prediction"] = self.transform_output(out)
lengths = x["decoder_lengths"]
if return_decoder_lengths:
decode_lenghts.append(lengths)
nan_mask = self._get_mask(out["prediction"].size(1), lengths)
if isinstance(mode, (tuple, list)):
if mode[0] == "raw":
out = out[mode[1]]
else:
raise ValueError(
f"If a tuple is specified, the first element must be 'raw' - got {mode[0]} instead"
)
elif mode == "prediction":
out = self.loss.to_prediction(out["prediction"])
# mask non-predictions
out = out.masked_fill(nan_mask, torch.tensor(float("nan")))
elif mode == "quantiles":
out = self.loss.to_quantiles(out["prediction"])
# mask non-predictions
out = out.masked_fill(
nan_mask.unsqueeze(-1), torch.tensor(float("nan"))
)
elif mode == "raw":
pass
else:
raise ValueError(f"Unknown mode {mode} - see docs for valid arguments")
output.append(out)
if return_x:
x_list.append(x)
if return_index:
index.append(dataloader.dataset.x_to_index(x))
progress_bar.update()
if fast_dev_run:
break
# concatenate
if isinstance(mode, (tuple, list)) or mode != "raw":
output = torch.cat(output, dim=0)
elif mode == "raw":
output_cat = {}
for name in output[0].keys():
output_cat[name] = torch.cat([out[name] for out in output], dim=0)
output = output_cat
# generate output
if return_x or return_index or return_decoder_lengths:
output = [output]
if return_x:
x_cat = {}
for name in x_list[0].keys():
x_cat[name] = torch.cat([x[name] for x in x_list], dim=0)
x_cat = x_cat
output.append(x_cat)
if return_index:
output.append(pd.concat(index, axis=0, ignore_index=True))
if return_decoder_lengths:
output.append(torch.cat(decode_lenghts, dim=0))
return output
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def from_dataset(
cls,
dataset: TimeSeriesDataSet,
**kwargs,
) -> LightningModule:
"""
Create model from dataset.
Args:
dataset: timeseries dataset
**kwargs: additional arguments such as hyperparameters for model (see ``__init__()``)
Returns:
LightningModule
"""
kwargs.setdefault("target", dataset.target)
return super().from_dataset(dataset, **kwargs)
|
def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs) -> LightningModule:
"""
Create model from dataset, i.e. save dataset parameters in model
This function should be called as ``super().from_dataset()`` in a derived models that implement it
Args:
dataset (TimeSeriesDataSet): timeseries dataset
Returns:
BaseModel: Model that can be trained
"""
if "output_transformer" not in kwargs:
kwargs["output_transformer"] = dataset.target_normalizer
net = cls(**kwargs)
net.dataset_parameters = dataset.get_parameters()
return net
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def calculate_prediction_actual_by_variable(
self,
x: Dict[str, torch.Tensor],
y_pred: torch.Tensor,
normalize: bool = True,
bins: int = 95,
std: float = 2.0,
) -> Dict[str, Dict[str, torch.Tensor]]:
"""
Calculate predictions and actuals by variable averaged by ``bins`` bins spanning from ``-std`` to ``+std``
Args:
x: input as ``forward()``
y_pred: predictions obtained by ``self.transform_output(self(x, **kwargs))``
normalize: if to return normalized averages, i.e. mean or sum of ``y``
bins: number of bins to calculate
std: number of standard deviations for standard scaled continuous variables
Returns:
dictionary that can be used to plot averages with :py:meth:`~plot_prediction_actual_by_variable`
"""
support = {} # histogram
# averages
averages_actual = {}
averages_prediction = {}
# mask values and transform to log space
max_encoder_length = x["decoder_lengths"].max()
mask = self._get_mask(max_encoder_length, x["decoder_lengths"], inverse=True)
# select valid y values
y_flat = x["decoder_target"][mask]
y_pred_flat = y_pred[mask]
log_y = self.dataset_parameters["target_normalizer"] is not None and getattr(
self.dataset_parameters["target_normalizer"], "log_scale", False
)
if log_y:
y_flat = torch.log(y_flat + 1e-8)
y_pred_flat = torch.log(y_pred_flat + 1e-8)
# real bins
positive_bins = (bins - 1) // 2
# if to normalize
if normalize:
reduction = "mean"
else:
reduction = "sum"
# continuous variables
reals = x["decoder_cont"]
for idx, name in enumerate(self.hparams.x_reals):
averages_actual[name], support[name] = groupby_apply(
(reals[..., idx][mask] * positive_bins / std)
.round()
.clamp(-positive_bins, positive_bins)
.long()
+ positive_bins,
y_flat,
bins=bins,
reduction=reduction,
return_histogram=True,
)
averages_prediction[name], _ = groupby_apply(
(reals[..., idx][mask] * positive_bins / std)
.round()
.clamp(-positive_bins, positive_bins)
.long()
+ positive_bins,
y_pred_flat,
bins=bins,
reduction=reduction,
return_histogram=True,
)
# categorical_variables
cats = x["decoder_cat"]
for idx, name in enumerate(
self.hparams.x_categoricals
): # todo: make it work for grouped categoricals
reduction = "sum"
name = self.categorical_groups_mapping.get(name, name)
averages_actual_cat, support_cat = groupby_apply(
cats[..., idx][mask],
y_flat,
bins=self.hparams.embedding_sizes[name][0],
reduction=reduction,
return_histogram=True,
)
averages_prediction_cat, _ = groupby_apply(
cats[..., idx][mask],
y_pred_flat,
bins=self.hparams.embedding_sizes[name][0],
reduction=reduction,
return_histogram=True,
)
# add either to existing calculations or
if name in averages_actual:
averages_actual[name] += averages_actual_cat
support[name] += support_cat
averages_prediction[name] += averages_prediction_cat
else:
averages_actual[name] = averages_actual_cat
support[name] = support_cat
averages_prediction[name] = averages_prediction_cat
if normalize: # run reduction for categoricals
for name in self.hparams.embedding_sizes.keys():
averages_actual[name] /= support[name].clamp(min=1)
averages_prediction[name] /= support[name].clamp(min=1)
if log_y: # reverse log scaling
for name in support.keys():
averages_actual[name] = torch.exp(averages_actual[name])
averages_prediction[name] = torch.exp(averages_prediction[name])
return {
"support": support,
"average": {"actual": averages_actual, "prediction": averages_prediction},
"std": std,
}
|
def calculate_prediction_actual_by_variable(
self,
x: Dict[str, torch.Tensor],
y_pred: torch.Tensor,
normalize: bool = True,
bins: int = 95,
std: float = 2.0,
) -> Dict[str, Dict[str, torch.Tensor]]:
"""
Calculate predictions and actuals by variable averaged by ``bins`` bins spanning from ``-std`` to ``+std``
Args:
x: input as ``forward()``
y_pred: predictions obtained by ``self.transform_output(self(x))``
normalize: if to return normalized averages, i.e. mean or sum of ``y``
bins: number of bins to calculate
std: number of standard deviations for standard scaled continuous variables
Returns:
dictionary that can be used to plot averages with :py:meth:`~plot_prediction_actual_by_variable`
"""
support = {} # histogram
# averages
averages_actual = {}
averages_prediction = {}
# mask values and transform to log space
max_encoder_length = x["decoder_lengths"].max()
mask = self._get_mask(max_encoder_length, x["decoder_lengths"], inverse=True)
# select valid y values
y_flat = x["decoder_target"][mask]
y_pred_flat = y_pred[mask]
log_y = self.dataset_parameters["target_normalizer"] is not None and getattr(
self.dataset_parameters["target_normalizer"], "log_scale", False
)
if log_y:
y_flat = torch.log(y_flat + 1e-8)
y_pred_flat = torch.log(y_pred_flat + 1e-8)
# real bins
positive_bins = (bins - 1) // 2
# if to normalize
if normalize:
reduction = "mean"
else:
reduction = "sum"
# continuous variables
reals = x["decoder_cont"]
for idx, name in enumerate(self.hparams.x_reals):
averages_actual[name], support[name] = groupby_apply(
(reals[..., idx][mask] * positive_bins / std)
.round()
.clamp(-positive_bins, positive_bins)
.long()
+ positive_bins,
y_flat,
bins=bins,
reduction=reduction,
return_histogram=True,
)
averages_prediction[name], _ = groupby_apply(
(reals[..., idx][mask] * positive_bins / std)
.round()
.clamp(-positive_bins, positive_bins)
.long()
+ positive_bins,
y_pred_flat,
bins=bins,
reduction=reduction,
return_histogram=True,
)
# categorical_variables
cats = x["decoder_cat"]
for idx, name in enumerate(
self.hparams.x_categoricals
): # todo: make it work for grouped categoricals
reduction = "sum"
name = self.categorical_groups_mapping.get(name, name)
averages_actual_cat, support_cat = groupby_apply(
cats[..., idx][mask],
y_flat,
bins=self.hparams.embedding_sizes[name][0],
reduction=reduction,
return_histogram=True,
)
averages_prediction_cat, _ = groupby_apply(
cats[..., idx][mask],
y_pred_flat,
bins=self.hparams.embedding_sizes[name][0],
reduction=reduction,
return_histogram=True,
)
# add either to existing calculations or
if name in averages_actual:
averages_actual[name] += averages_actual_cat
support[name] += support_cat
averages_prediction[name] += averages_prediction_cat
else:
averages_actual[name] = averages_actual_cat
support[name] = support_cat
averages_prediction[name] = averages_prediction_cat
if normalize: # run reduction for categoricals
for name in self.hparams.embedding_sizes.keys():
averages_actual[name] /= support[name].clamp(min=1)
averages_prediction[name] /= support[name].clamp(min=1)
if log_y: # reverse log scaling
for name in support.keys():
averages_actual[name] = torch.exp(averages_actual[name])
averages_prediction[name] = torch.exp(averages_prediction[name])
return {
"support": support,
"average": {"actual": averages_actual, "prediction": averages_prediction},
"std": std,
}
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def plot_interpretation(
self,
x: Dict[str, torch.Tensor],
output: Dict[str, torch.Tensor],
idx: int,
ax=None,
plot_seasonality_and_generic_on_secondary_axis: bool = False,
) -> plt.Figure:
"""
Plot interpretation.
Plot two pannels: prediction and backcast vs actuals and
decomposition of prediction into trend, seasonality and generic forecast.
Args:
x (Dict[str, torch.Tensor]): network input
output (Dict[str, torch.Tensor]): network output
idx (int): index of sample for which to plot the interpretation.
ax (List[matplotlib axes], optional): list of two matplotlib axes onto which to plot the interpretation.
Defaults to None.
plot_seasonality_and_generic_on_secondary_axis (bool, optional): if to plot seasonality and
generic forecast on secondary axis in second panel. Defaults to False.
Returns:
plt.Figure: matplotlib figure
"""
if ax is None:
fig, ax = plt.subplots(2, 1, figsize=(6, 8))
else:
fig = ax.get_figure()
time = torch.arange(-self.hparams.context_length, self.hparams.prediction_length)
def to_prediction(y):
return self.transform_output(
dict(prediction=y[[idx]], target_scale=x["target_scale"][[idx]])
)[0]
# plot target vs prediction
ax[0].plot(
time,
torch.cat([x["encoder_target"][idx], x["decoder_target"][idx]]).cpu(),
label="target",
)
ax[0].plot(
time,
torch.cat(
[
to_prediction(output["backcast"].detach()),
output["prediction"][idx].detach(),
],
dim=0,
).cpu(),
label="prediction",
)
ax[0].set_xlabel("Time")
# plot blocks
prop_cycle = iter(plt.rcParams["axes.prop_cycle"])
next(prop_cycle) # prediction
next(prop_cycle) # observations
if plot_seasonality_and_generic_on_secondary_axis:
ax2 = ax[1].twinx()
ax2.set_ylabel("Seasonality / Generic")
else:
ax2 = ax[1]
for title in ["trend", "seasonality", "generic"]:
if title not in self.hparams.stack_types:
continue
if title == "trend":
ax[1].plot(
time,
to_prediction(output[title]).cpu(),
label=title.capitalize(),
c=next(prop_cycle)["color"],
)
else:
ax2.plot(
time,
to_prediction(output[title]).cpu(),
label=title.capitalize(),
c=next(prop_cycle)["color"],
)
ax[1].set_xlabel("Time")
ax[1].set_ylabel("Decomposition")
fig.legend()
return fig
|
def plot_interpretation(
self,
x: Dict[str, torch.Tensor],
output: Dict[str, torch.Tensor],
idx: int,
ax=None,
plot_seasonality_and_generic_on_secondary_axis: bool = False,
) -> plt.Figure:
"""
Plot interpretation.
Plot two pannels: prediction and backcast vs actuals and
decomposition of prediction into trend, seasonality and generic forecast.
Args:
x (Dict[str, torch.Tensor]): network input
output (Dict[str, torch.Tensor]): network output
idx (int): index of sample for which to plot the interpretation.
ax (List[matplotlib axes], optional): list of two matplotlib axes onto which to plot the interpretation.
Defaults to None.
plot_seasonality_and_generic_on_secondary_axis (bool, optional): if to plot seasonality and
generic forecast on secondary axis in second panel. Defaults to False.
Returns:
plt.Figure: matplotlib figure
"""
if ax is None:
fig, ax = plt.subplots(2, 1, figsize=(6, 8))
else:
fig = ax.get_figure()
time = torch.arange(-self.hparams.context_length, self.hparams.prediction_length)
def to_prediction(y):
return self.transform_output(
dict(prediction=y[[idx]], target_scale=x["target_scale"][[idx]])
)[0]
# plot target vs prediction
ax[0].plot(
time,
torch.cat([x["encoder_target"][idx], x["decoder_target"][idx]]),
label="target",
)
ax[0].plot(
time,
torch.cat(
[
to_prediction(output["backcast"].detach()),
output["prediction"][idx].detach(),
],
dim=0,
),
label="prediction",
)
ax[0].set_xlabel("Time")
# plot blocks
prop_cycle = iter(plt.rcParams["axes.prop_cycle"])
next(prop_cycle) # prediction
next(prop_cycle) # observations
if plot_seasonality_and_generic_on_secondary_axis:
ax2 = ax[1].twinx()
ax2.set_ylabel("Seasonality / Generic")
else:
ax2 = ax[1]
for title in ["trend", "seasonality", "generic"]:
if title not in self.hparams.stack_types:
continue
if title == "trend":
ax[1].plot(
time,
to_prediction(output[title]),
label=title.capitalize(),
c=next(prop_cycle)["color"],
)
else:
ax2.plot(
time,
to_prediction(output[title]),
label=title.capitalize(),
c=next(prop_cycle)["color"],
)
ax[1].set_xlabel("Time")
ax[1].set_ylabel("Decomposition")
fig.legend()
return fig
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def __init__(
self,
hidden_size: int = 16,
lstm_layers: int = 1,
dropout: float = 0.1,
output_size: int = 7,
loss: MultiHorizonMetric = None,
attention_head_size: int = 4,
max_encoder_length: int = 10,
static_categoricals: List[str] = [],
static_reals: List[str] = [],
time_varying_categoricals_encoder: List[str] = [],
time_varying_categoricals_decoder: List[str] = [],
categorical_groups: Dict[str, List[str]] = {},
time_varying_reals_encoder: List[str] = [],
time_varying_reals_decoder: List[str] = [],
x_reals: List[str] = [],
x_categoricals: List[str] = [],
hidden_continuous_size: int = 8,
hidden_continuous_sizes: Dict[str, int] = {},
embedding_sizes: Dict[str, Tuple[int, int]] = {},
embedding_paddings: List[str] = [],
embedding_labels: Dict[str, np.ndarray] = {},
learning_rate: float = 1e-3,
log_interval: Union[int, float] = -1,
log_val_interval: Union[int, float] = None,
log_gradient_flow: bool = False,
reduce_on_plateau_patience: int = 1000,
monotone_constaints: Dict[str, int] = {},
share_single_variable_networks: bool = False,
logging_metrics: nn.ModuleList = None,
**kwargs,
):
"""
Temporal Fusion Transformer for forecasting timeseries - use its :py:meth:`~from_dataset` method if possible.
Implementation of the article
`Temporal Fusion Transformers for Interpretable Multi-horizon Time Series
Forecasting <https://arxiv.org/pdf/1912.09363.pdf>`_. The network outperforms DeepAR by Amazon by 36-69%
in benchmarks.
Enhancements compared to the original implementation (apart from capabilities added through base model
such as monotone constraints):
* static variables can be continuous
* multiple categorical variables can be summarized with an EmbeddingBag
* variable encoder and decoder length by sample
* categorical embeddings are not transformed by variable selection network (because it is a redundant operation)
* variable dimension in variable selection network are scaled up via linear interpolation to reduce
number of parameters
* non-linear variable processing in variable selection network can be shared among decoder and encoder
(not shared by default)
Tune its hyperparameters with
:py:func:`~pytorch_forecasting.models.temporal_fusion_transformer.tuning.optimize_hyperparameters`.
Args:
hidden_size: hidden size of network which is its main hyperparameter and can range from 8 to 512
lstm_layers: number of LSTM layers (2 is mostly optimal)
dropout: dropout rate
output_size: number of outputs (e.g. number of quantiles for QuantileLoss)
loss: loss function taking prediction and targets
attention_head_size: number of attention heads (4 is a good default)
max_encoder_length: length to encode (can be far longer than the decoder length but does not have to be)
static_categoricals: integer of positions of static categorical variables
static_reals: integer of positions of static continuous variables
time_varying_categoricals_encoder: integer of positions of categorical variables for encoder
time_varying_categoricals_decoder: integer of positions of categorical variables for decoder
time_varying_reals_encoder: integer of positions of continuous variables for encoder
time_varying_reals_decoder: integer of positions of continuous variables for decoder
categorical_groups: dictionary where values
are list of categorical variables that are forming together a new categorical
variable which is the key in the dictionary
x_reals: order of continuous variables in tensor passed to forward function
x_categoricals: order of categorical variables in tensor passed to forward function
hidden_continuous_size: default for hidden size for processing continous variables (similar to categorical
embedding size)
hidden_continuous_sizes: dictionary mapping continuous input indices to sizes for variable selection
(fallback to hidden_continuous_size if index is not in dictionary)
embedding_sizes: dictionary mapping (string) indices to tuple of number of categorical classes and
embedding size
embedding_paddings: list of indices for embeddings which transform the zero's embedding to a zero vector
embedding_labels: dictionary mapping (string) indices to list of categorical labels
learning_rate: learning rate
log_interval: log predictions every x batches, do not log if 0 or less, log interpretation if > 0. If < 1.0
, will log multiple entries per batch. Defaults to -1.
log_val_interval: frequency with which to log validation set metrics, defaults to log_interval
log_gradient_flow: if to log gradient flow, this takes time and should be only done to diagnose training
failures
reduce_on_plateau_patience (int): patience after which learning rate is reduced by a factor of 10
monotone_constaints (Dict[str, int]): dictionary of monotonicity constraints for continuous decoder
variables mapping
position (e.g. ``"0"`` for first position) to constraint (``-1`` for negative and ``+1`` for positive,
larger numbers add more weight to the constraint vs. the loss but are usually not necessary).
This constraint significantly slows down training. Defaults to {}.
share_single_variable_networks (bool): if to share the single variable networks between the encoder and
decoder. Defaults to False.
logging_metrics (nn.ModuleList[MultiHorizonMetric]): list of metrics that are logged during training.
Defaults to nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE()]).
**kwargs: additional arguments to :py:class:`~BaseModel`.
"""
if logging_metrics is None:
logging_metrics = nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE()])
if loss is None:
loss = QuantileLoss()
self.save_hyperparameters()
# store loss function separately as it is a module
assert isinstance(loss, MultiHorizonMetric), (
"Loss has to of class `MultiHorizonMetric`"
)
super().__init__(loss=loss, logging_metrics=logging_metrics, **kwargs)
# processing inputs
# embeddings
self.input_embeddings = MultiEmbedding(
embedding_sizes=self.hparams.embedding_sizes,
categorical_groups=self.hparams.categorical_groups,
embedding_paddings=self.hparams.embedding_paddings,
x_categoricals=self.hparams.x_categoricals,
max_embedding_size=self.hparams.hidden_size,
)
# continuous variable processing
self.prescalers = nn.ModuleDict(
{
name: nn.Linear(
1,
self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
),
)
for name in self.reals
}
)
# variable selection
# variable selection for static variables
static_input_sizes = {
name: self.hparams.embedding_sizes[name][1]
for name in self.hparams.static_categoricals
}
static_input_sizes.update(
{
name: self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
)
for name in self.hparams.static_reals
}
)
self.static_variable_selection = VariableSelectionNetwork(
input_sizes=static_input_sizes,
hidden_size=self.hparams.hidden_size,
input_embedding_flags={name: True for name in self.hparams.static_categoricals},
dropout=self.hparams.dropout,
prescalers=self.prescalers,
)
# variable selection for encoder and decoder
encoder_input_sizes = {
name: self.hparams.embedding_sizes[name][1]
for name in self.hparams.time_varying_categoricals_encoder
}
encoder_input_sizes.update(
{
name: self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
)
for name in self.hparams.time_varying_reals_encoder
}
)
decoder_input_sizes = {
name: self.hparams.embedding_sizes[name][1]
for name in self.hparams.time_varying_categoricals_decoder
}
decoder_input_sizes.update(
{
name: self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
)
for name in self.hparams.time_varying_reals_decoder
}
)
# create single variable grns that are shared across decoder and encoder
if self.hparams.share_single_variable_networks:
self.shared_single_variable_grns = nn.ModuleDict()
for name, input_size in encoder_input_sizes.items():
self.shared_single_variable_grns[name] = GatedResidualNetwork(
input_size,
min(input_size, self.hparams.hidden_size),
self.hparams.hidden_size,
self.hparams.dropout,
)
for name, input_size in decoder_input_sizes.items():
if name not in self.shared_single_variable_grns:
self.shared_single_variable_grns[name] = GatedResidualNetwork(
input_size,
min(input_size, self.hparams.hidden_size),
self.hparams.hidden_size,
self.hparams.dropout,
)
self.encoder_variable_selection = VariableSelectionNetwork(
input_sizes=encoder_input_sizes,
hidden_size=self.hparams.hidden_size,
input_embedding_flags={
name: True for name in self.hparams.time_varying_categoricals_encoder
},
dropout=self.hparams.dropout,
context_size=self.hparams.hidden_size,
prescalers=self.prescalers,
single_variable_grns={}
if not self.hparams.share_single_variable_networks
else self.shared_single_variable_grns,
)
self.decoder_variable_selection = VariableSelectionNetwork(
input_sizes=decoder_input_sizes,
hidden_size=self.hparams.hidden_size,
input_embedding_flags={
name: True for name in self.hparams.time_varying_categoricals_decoder
},
dropout=self.hparams.dropout,
context_size=self.hparams.hidden_size,
prescalers=self.prescalers,
single_variable_grns={}
if not self.hparams.share_single_variable_networks
else self.shared_single_variable_grns,
)
# static encoders
# for variable selection
self.static_context_variable_selection = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# for hidden state of the lstm
self.static_context_initial_hidden_lstm = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# for cell state of the lstm
self.static_context_initial_cell_lstm = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# for post lstm static enrichment
self.static_context_enrichment = GatedResidualNetwork(
self.hparams.hidden_size,
self.hparams.hidden_size,
self.hparams.hidden_size,
self.hparams.dropout,
)
# lstm encoder (history) and decoder (future) for local processing
self.lstm_encoder = nn.LSTM(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
num_layers=self.hparams.lstm_layers,
dropout=self.hparams.dropout if self.hparams.lstm_layers > 1 else 0,
batch_first=True,
)
self.lstm_decoder = nn.LSTM(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
num_layers=self.hparams.lstm_layers,
dropout=self.hparams.dropout if self.hparams.lstm_layers > 1 else 0,
batch_first=True,
)
# skip connection for lstm
self.post_lstm_gate_encoder = GatedLinearUnit(
self.hparams.hidden_size, dropout=self.hparams.dropout
)
self.post_lstm_gate_decoder = self.post_lstm_gate_encoder
# self.post_lstm_gate_decoder = GatedLinearUnit(self.hparams.hidden_size, dropout=self.hparams.dropout)
self.post_lstm_add_norm_encoder = AddNorm(
self.hparams.hidden_size, trainable_add=False
)
# self.post_lstm_add_norm_decoder = AddNorm(self.hparams.hidden_size, trainable_add=True)
self.post_lstm_add_norm_decoder = self.post_lstm_add_norm_encoder
# static enrichment and processing past LSTM
self.static_enrichment = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
context_size=self.hparams.hidden_size,
)
# attention for long-range processing
self.multihead_attn = InterpretableMultiHeadAttention(
d_model=self.hparams.hidden_size,
n_head=self.hparams.attention_head_size,
dropout=self.hparams.dropout,
)
self.post_attn_gate_norm = GateAddNorm(
self.hparams.hidden_size, dropout=self.hparams.dropout, trainable_add=False
)
self.pos_wise_ff = GatedResidualNetwork(
self.hparams.hidden_size,
self.hparams.hidden_size,
self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# output processing -> no dropout at this late stage
self.pre_output_gate_norm = GateAddNorm(
self.hparams.hidden_size, dropout=None, trainable_add=False
)
self.output_layer = nn.Linear(self.hparams.hidden_size, self.hparams.output_size)
|
def __init__(
self,
hidden_size: int = 16,
lstm_layers: int = 1,
dropout: float = 0.1,
output_size: int = 7,
loss: MultiHorizonMetric = None,
attention_head_size: int = 4,
max_encoder_length: int = 10,
static_categoricals: List[str] = [],
static_reals: List[str] = [],
time_varying_categoricals_encoder: List[str] = [],
time_varying_categoricals_decoder: List[str] = [],
categorical_groups: Dict[str, List[str]] = {},
time_varying_reals_encoder: List[str] = [],
time_varying_reals_decoder: List[str] = [],
x_reals: List[str] = [],
x_categoricals: List[str] = [],
hidden_continuous_size: int = 8,
hidden_continuous_sizes: Dict[str, int] = {},
embedding_sizes: Dict[str, Tuple[int, int]] = {},
embedding_paddings: List[str] = [],
embedding_labels: Dict[str, np.ndarray] = {},
learning_rate: float = 1e-3,
log_interval: Union[int, float] = -1,
log_val_interval: Union[int, float] = None,
log_gradient_flow: bool = False,
reduce_on_plateau_patience: int = 1000,
monotone_constaints: Dict[str, int] = {},
share_single_variable_networks: bool = False,
logging_metrics: nn.ModuleList = None,
**kwargs,
):
"""
Temporal Fusion Transformer for forecasting timeseries - use its :py:meth:`~from_dataset` method if possible.
Implementation of the article
`Temporal Fusion Transformers for Interpretable Multi-horizon Time Series
Forecasting <https://arxiv.org/pdf/1912.09363.pdf>`_. The network outperforms DeepAR by Amazon by 36-69%
in benchmarks.
Enhancements compared to the original implementation (apart from capabilities added through base model
such as monotone constraints):
* static variables can be continuous
* multiple categorical variables can be summarized with an EmbeddingBag
* variable encoder and decoder length by sample
* categorical embeddings are not transformed by variable selection network (because it is a redundant operation)
* variable dimension in variable selection network are scaled up via linear interpolation to reduce
number of parameters
* non-linear variable processing in variable selection network can be shared among decoder and encoder
(not shared by default)
Tune its hyperparameters with
:py:func:`~pytorch_forecasting.models.temporal_fusion_transformer.tuning.optimize_hyperparameters`.
Args:
hidden_size: hidden size of network which is its main hyperparameter and can range from 8 to 512
lstm_layers: number of LSTM layers (2 is mostly optimal)
dropout: dropout rate
output_size: number of outputs (e.g. number of quantiles for QuantileLoss)
loss: loss function taking prediction and targets
attention_head_size: number of attention heads (4 is a good default)
max_encoder_length: length to encode (can be far longer than the decoder length but does not have to be)
static_categoricals: integer of positions of static categorical variables
static_reals: integer of positions of static continuous variables
time_varying_categoricals_encoder: integer of positions of categorical variables for encoder
time_varying_categoricals_decoder: integer of positions of categorical variables for decoder
time_varying_reals_encoder: integer of positions of continuous variables for encoder
time_varying_reals_decoder: integer of positions of continuous variables for decoder
categorical_groups: dictionary where values
are list of categorical variables that are forming together a new categorical
variable which is the key in the dictionary
x_reals: order of continuous variables in tensor passed to forward function
x_categoricals: order of categorical variables in tensor passed to forward function
hidden_continuous_size: default for hidden size for processing continous variables (similar to categorical
embedding size)
hidden_continuous_sizes: dictionary mapping continuous input indices to sizes for variable selection
(fallback to hidden_continuous_size if index is not in dictionary)
embedding_sizes: dictionary mapping (string) indices to tuple of number of categorical classes and
embedding size
embedding_paddings: list of indices for embeddings which transform the zero's embedding to a zero vector
embedding_labels: dictionary mapping (string) indices to list of categorical labels
learning_rate: learning rate
log_interval: log predictions every x batches, do not log if 0 or less, log interpretation if > 0. If < 1.0
, will log multiple entries per batch. Defaults to -1.
log_val_interval: frequency with which to log validation set metrics, defaults to log_interval
log_gradient_flow: if to log gradient flow, this takes time and should be only done to diagnose training
failures
reduce_on_plateau_patience (int): patience after which learning rate is reduced by a factor of 10
monotone_constaints (Dict[str, int]): dictionary of monotonicity constraints for continuous decoder
variables mapping
position (e.g. ``"0"`` for first position) to constraint (``-1`` for negative and ``+1`` for positive,
larger numbers add more weight to the constraint vs. the loss but are usually not necessary).
This constraint significantly slows down training. Defaults to {}.
share_single_variable_networks (bool): if to share the single variable networks between the encoder and
decoder. Defaults to False.
logging_metrics (nn.ModuleList[MultiHorizonMetric]): list of metrics that are logged during training.
Defaults to nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE()]).
**kwargs: additional arguments to :py:class:`~BaseModel`.
"""
if logging_metrics is None:
logging_metrics = nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE()])
if loss is None:
loss = QuantileLoss()
self.save_hyperparameters()
# store loss function separately as it is a module
assert isinstance(loss, MultiHorizonMetric), (
"Loss has to of class `MultiHorizonMetric`"
)
super().__init__(loss=loss, logging_metrics=logging_metrics, **kwargs)
# processing inputs
# embeddings
self.input_embeddings = MultiEmbedding(
embedding_sizes=self.hparams.embedding_sizes,
categorical_groups=self.hparams.categorical_groups,
embedding_paddings=self.hparams.embedding_paddings,
x_categoricals=self.hparams.x_categoricals,
max_embedding_size=self.hparams.hidden_size,
)
# continuous variable processing
self.prescalers = nn.ModuleDict(
{
name: nn.Linear(
1,
self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
),
)
for name in self.hparams.x_reals
}
)
# variable selection
# variable selection for static variables
static_input_sizes = {
name: self.hparams.embedding_sizes[name][1]
for name in self.hparams.static_categoricals
}
static_input_sizes.update(
{
name: self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
)
for name in self.hparams.static_reals
}
)
self.static_variable_selection = VariableSelectionNetwork(
input_sizes=static_input_sizes,
hidden_size=self.hparams.hidden_size,
input_embedding_flags={name: True for name in self.hparams.static_categoricals},
dropout=self.hparams.dropout,
prescalers=self.prescalers,
)
# variable selection for encoder and decoder
encoder_input_sizes = {
name: self.hparams.embedding_sizes[name][1]
for name in self.hparams.time_varying_categoricals_encoder
}
encoder_input_sizes.update(
{
name: self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
)
for name in self.hparams.time_varying_reals_encoder
}
)
decoder_input_sizes = {
name: self.hparams.embedding_sizes[name][1]
for name in self.hparams.time_varying_categoricals_decoder
}
decoder_input_sizes.update(
{
name: self.hparams.hidden_continuous_sizes.get(
name, self.hparams.hidden_continuous_size
)
for name in self.hparams.time_varying_reals_decoder
}
)
# create single variable grns that are shared across decoder and encoder
if self.hparams.share_single_variable_networks:
self.shared_single_variable_grns = nn.ModuleDict()
for name, input_size in encoder_input_sizes.items():
self.shared_single_variable_grns[name] = GatedResidualNetwork(
input_size,
min(input_size, self.hparams.hidden_size),
self.hparams.hidden_size,
self.hparams.dropout,
)
for name, input_size in decoder_input_sizes.items():
if name not in self.shared_single_variable_grns:
self.shared_single_variable_grns[name] = GatedResidualNetwork(
input_size,
min(input_size, self.hparams.hidden_size),
self.hparams.hidden_size,
self.hparams.dropout,
)
self.encoder_variable_selection = VariableSelectionNetwork(
input_sizes=encoder_input_sizes,
hidden_size=self.hparams.hidden_size,
input_embedding_flags={
name: True for name in self.hparams.time_varying_categoricals_encoder
},
dropout=self.hparams.dropout,
context_size=self.hparams.hidden_size,
prescalers=self.prescalers,
single_variable_grns={}
if not self.hparams.share_single_variable_networks
else self.shared_single_variable_grns,
)
self.decoder_variable_selection = VariableSelectionNetwork(
input_sizes=decoder_input_sizes,
hidden_size=self.hparams.hidden_size,
input_embedding_flags={
name: True for name in self.hparams.time_varying_categoricals_decoder
},
dropout=self.hparams.dropout,
context_size=self.hparams.hidden_size,
prescalers=self.prescalers,
single_variable_grns={}
if not self.hparams.share_single_variable_networks
else self.shared_single_variable_grns,
)
# static encoders
# for variable selection
self.static_context_variable_selection = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# for hidden state of the lstm
self.static_context_initial_hidden_lstm = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# for cell state of the lstm
self.static_context_initial_cell_lstm = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# for post lstm static enrichment
self.static_context_enrichment = GatedResidualNetwork(
self.hparams.hidden_size,
self.hparams.hidden_size,
self.hparams.hidden_size,
self.hparams.dropout,
)
# lstm encoder (history) and decoder (future) for local processing
self.lstm_encoder = nn.LSTM(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
num_layers=self.hparams.lstm_layers,
dropout=self.hparams.dropout if self.hparams.lstm_layers > 1 else 0,
batch_first=True,
)
self.lstm_decoder = nn.LSTM(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
num_layers=self.hparams.lstm_layers,
dropout=self.hparams.dropout if self.hparams.lstm_layers > 1 else 0,
batch_first=True,
)
# skip connection for lstm
self.post_lstm_gate_encoder = GatedLinearUnit(
self.hparams.hidden_size, dropout=self.hparams.dropout
)
self.post_lstm_gate_decoder = self.post_lstm_gate_encoder
# self.post_lstm_gate_decoder = GatedLinearUnit(self.hparams.hidden_size, dropout=self.hparams.dropout)
self.post_lstm_add_norm_encoder = AddNorm(
self.hparams.hidden_size, trainable_add=False
)
# self.post_lstm_add_norm_decoder = AddNorm(self.hparams.hidden_size, trainable_add=True)
self.post_lstm_add_norm_decoder = self.post_lstm_add_norm_encoder
# static enrichment and processing past LSTM
self.static_enrichment = GatedResidualNetwork(
input_size=self.hparams.hidden_size,
hidden_size=self.hparams.hidden_size,
output_size=self.hparams.hidden_size,
dropout=self.hparams.dropout,
context_size=self.hparams.hidden_size,
)
# attention for long-range processing
self.multihead_attn = InterpretableMultiHeadAttention(
d_model=self.hparams.hidden_size,
n_head=self.hparams.attention_head_size,
dropout=self.hparams.dropout,
)
self.post_attn_gate_norm = GateAddNorm(
self.hparams.hidden_size, dropout=self.hparams.dropout, trainable_add=False
)
self.pos_wise_ff = GatedResidualNetwork(
self.hparams.hidden_size,
self.hparams.hidden_size,
self.hparams.hidden_size,
dropout=self.hparams.dropout,
)
# output processing -> no dropout at this late stage
self.pre_output_gate_norm = GateAddNorm(
self.hparams.hidden_size, dropout=None, trainable_add=False
)
self.output_layer = nn.Linear(self.hparams.hidden_size, self.hparams.output_size)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def from_dataset(
cls,
dataset: TimeSeriesDataSet,
allowed_encoder_known_variable_names: List[str] = None,
**kwargs,
):
"""
Create model from dataset.
Args:
dataset: timeseries dataset
allowed_encoder_known_variable_names: List of known variables that are allowed in encoder, defaults to all
**kwargs: additional arguments such as hyperparameters for model (see ``__init__()``)
Returns:
TemporalFusionTransformer
"""
new_kwargs = dict(
max_encoder_length=dataset.max_encoder_length,
)
new_kwargs.update(kwargs)
# create class and return
return super().from_dataset(
dataset,
allowed_encoder_known_variable_names=allowed_encoder_known_variable_names,
**new_kwargs,
)
|
def from_dataset(
cls,
dataset: TimeSeriesDataSet,
allowed_encoder_known_variable_names: List[str] = None,
**kwargs,
):
"""
Create model from dataset.
Args:
dataset: timeseries dataset
allowed_encoder_known_variable_names: List of known variables that are allowed in encoder, defaults to all
**kwargs: additional arguments such as hyperparameters for model (see ``__init__()``)
Returns:
TemporalFusionTransformer
"""
if allowed_encoder_known_variable_names is None:
allowed_encoder_known_variable_names = (
dataset.time_varying_known_categoricals + dataset.time_varying_known_reals
)
# embeddings
embedding_labels = {
name: encoder.classes_
for name, encoder in dataset.categorical_encoders.items()
if name in dataset.categoricals
}
embedding_paddings = dataset.dropout_categoricals
# determine embedding sizes based on heuristic
embedding_sizes = {
name: (len(encoder.classes_), get_embedding_size(len(encoder.classes_)))
for name, encoder in dataset.categorical_encoders.items()
if name in dataset.categoricals
}
embedding_sizes.update(kwargs.get("embedding_sizes", {}))
kwargs.setdefault("embedding_sizes", embedding_sizes)
new_kwargs = dict(
max_encoder_length=dataset.max_encoder_length,
static_categoricals=dataset.static_categoricals,
time_varying_categoricals_encoder=[
name
for name in dataset.time_varying_known_categoricals
if name in allowed_encoder_known_variable_names
]
+ dataset.time_varying_unknown_categoricals,
time_varying_categoricals_decoder=dataset.time_varying_known_categoricals,
static_reals=dataset.static_reals,
time_varying_reals_encoder=[
name
for name in dataset.time_varying_known_reals
if name in allowed_encoder_known_variable_names
]
+ dataset.time_varying_unknown_reals,
time_varying_reals_decoder=dataset.time_varying_known_reals,
x_reals=dataset.reals,
x_categoricals=dataset.flat_categoricals,
embedding_labels=embedding_labels,
embedding_paddings=embedding_paddings,
categorical_groups=dataset.variable_groups,
)
new_kwargs.update(kwargs)
# create class and return
return super().from_dataset(dataset, **new_kwargs)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
input dimensions: n_samples x time x variables
"""
encoder_lengths = x["encoder_lengths"]
decoder_lengths = x["decoder_lengths"]
x_cat = torch.cat(
[x["encoder_cat"], x["decoder_cat"]], dim=1
) # concatenate in time dimension
x_cont = torch.cat(
[x["encoder_cont"], x["decoder_cont"]], dim=1
) # concatenate in time dimension
timesteps = x_cont.size(1) # encode + decode length
max_encoder_length = int(encoder_lengths.max())
input_vectors = self.input_embeddings(x_cat)
input_vectors.update(
{
name: x_cont[..., idx].unsqueeze(-1)
for idx, name in enumerate(self.hparams.x_reals)
if name in self.reals
}
)
# Embedding and variable selection
if len(self.static_variables) > 0:
# static embeddings will be constant over entire batch
static_embedding = {
name: input_vectors[name][:, 0] for name in self.static_variables
}
static_embedding, static_variable_selection = self.static_variable_selection(
static_embedding
)
else:
static_embedding = torch.zeros(
(x_cont.size(0), self.hparams.hidden_size),
dtype=self.dtype,
device=self.device,
)
static_variable_selection = torch.zeros(
(x_cont.size(0), 0), dtype=self.dtype, device=self.device
)
static_context_variable_selection = self.expand_static_context(
self.static_context_variable_selection(static_embedding), timesteps
)
embeddings_varying_encoder = {
name: input_vectors[name][:, :max_encoder_length]
for name in self.encoder_variables
}
embeddings_varying_encoder, encoder_sparse_weights = (
self.encoder_variable_selection(
embeddings_varying_encoder,
static_context_variable_selection[:, :max_encoder_length],
)
)
embeddings_varying_decoder = {
name: input_vectors[name][:, max_encoder_length:]
for name in self.decoder_variables # select decoder
}
embeddings_varying_decoder, decoder_sparse_weights = (
self.decoder_variable_selection(
embeddings_varying_decoder,
static_context_variable_selection[:, max_encoder_length:],
)
)
# LSTM
# run lstm at least once, i.e. encode length has to be > 0
lstm_encoder_lengths = encoder_lengths.where(
encoder_lengths > 0, torch.ones_like(encoder_lengths)
)
# calculate initial state
input_hidden = self.static_context_initial_hidden_lstm(static_embedding).expand(
self.hparams.lstm_layers, -1, -1
)
input_cell = self.static_context_initial_cell_lstm(static_embedding).expand(
self.hparams.lstm_layers, -1, -1
)
# # run local encoder
encoder_output, (hidden, cell) = self.lstm_encoder(
rnn.pack_padded_sequence(
embeddings_varying_encoder,
lstm_encoder_lengths.cpu(),
enforce_sorted=False,
batch_first=True,
),
(input_hidden, input_cell),
)
encoder_output, _ = rnn.pad_packed_sequence(encoder_output, batch_first=True)
# replace hidden cell with initial input if encoder_length is zero to determine correct initial state
no_encoding = (encoder_lengths == 0)[
None, :, None
] # shape: n_lstm_layers x batch_size x hidden_size
hidden = hidden.masked_scatter(no_encoding, input_hidden)
cell = cell.masked_scatter(no_encoding, input_cell)
# run local decoder
decoder_output, _ = self.lstm_decoder(
rnn.pack_padded_sequence(
embeddings_varying_decoder,
decoder_lengths.cpu(),
enforce_sorted=False,
batch_first=True,
),
(hidden, cell),
)
decoder_output, _ = rnn.pad_packed_sequence(decoder_output, batch_first=True)
# skip connection over lstm
lstm_output_encoder = self.post_lstm_gate_encoder(encoder_output)
lstm_output_encoder = self.post_lstm_add_norm_encoder(
lstm_output_encoder, embeddings_varying_encoder
)
lstm_output_decoder = self.post_lstm_gate_decoder(decoder_output)
lstm_output_decoder = self.post_lstm_add_norm_decoder(
lstm_output_decoder, embeddings_varying_decoder
)
lstm_output = torch.cat([lstm_output_encoder, lstm_output_decoder], dim=1)
# static enrichment
static_context_enrichment = self.static_context_enrichment(static_embedding)
attn_input = self.static_enrichment(
lstm_output, self.expand_static_context(static_context_enrichment, timesteps)
)
# Attention
attn_output, attn_output_weights = self.multihead_attn(
q=attn_input[:, max_encoder_length:], # query only for predictions
k=attn_input,
v=attn_input,
mask=self.get_attention_mask(
encoder_lengths=encoder_lengths,
decoder_length=timesteps - max_encoder_length,
),
)
# skip connection over attention
attn_output = self.post_attn_gate_norm(
attn_output, attn_input[:, max_encoder_length:]
)
output = self.pos_wise_ff(attn_output)
# skip connection over temporal fusion decoder (not LSTM decoder despite the LSTM output contains
# a skip from the variable selection network)
output = self.pre_output_gate_norm(output, lstm_output[:, max_encoder_length:])
output = self.output_layer(output)
return dict(
prediction=output,
attention=attn_output_weights,
static_variables=static_variable_selection,
encoder_variables=encoder_sparse_weights,
decoder_variables=decoder_sparse_weights,
decoder_lengths=decoder_lengths,
encoder_lengths=encoder_lengths,
groups=x["groups"],
decoder_time_idx=x["decoder_time_idx"],
target_scale=x["target_scale"],
)
|
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
input dimensions: n_samples x time x variables
"""
encoder_lengths = x["encoder_lengths"]
decoder_lengths = x["decoder_lengths"]
x_cat = torch.cat(
[x["encoder_cat"], x["decoder_cat"]], dim=1
) # concatenate in time dimension
x_cont = torch.cat(
[x["encoder_cont"], x["decoder_cont"]], dim=1
) # concatenate in time dimension
timesteps = x_cont.size(1) # encode + decode length
max_encoder_length = int(encoder_lengths.max())
input_vectors = self.input_embeddings(x_cat)
input_vectors.update(
{
name: x_cont[..., idx].unsqueeze(-1)
for idx, name in enumerate(self.hparams.x_reals)
}
)
# Embedding and variable selection
if len(self.hparams.static_categoricals + self.hparams.static_reals) > 0:
# static embeddings will be constant over entire batch
static_embedding = {
name: input_vectors[name][:, 0]
for name in self.hparams.static_categoricals + self.hparams.static_reals
}
static_embedding, static_variable_selection = self.static_variable_selection(
static_embedding
)
else:
static_embedding = torch.zeros(
(x_cont.size(0), self.hparams.hidden_size),
dtype=self.dtype,
device=self.device,
)
static_variable_selection = torch.zeros(
(x_cont.size(0), 0), dtype=self.dtype, device=self.device
)
static_context_variable_selection = self.expand_static_context(
self.static_context_variable_selection(static_embedding), timesteps
)
embeddings_varying_encoder = {
name: input_vectors[name][:, :max_encoder_length]
for name in self.hparams.time_varying_categoricals_encoder
+ self.hparams.time_varying_reals_encoder
}
embeddings_varying_encoder, encoder_sparse_weights = (
self.encoder_variable_selection(
embeddings_varying_encoder,
static_context_variable_selection[:, :max_encoder_length],
)
)
embeddings_varying_decoder = {
name: input_vectors[name][:, max_encoder_length:] # select decoder
for name in self.hparams.time_varying_categoricals_decoder
+ self.hparams.time_varying_reals_decoder
}
embeddings_varying_decoder, decoder_sparse_weights = (
self.decoder_variable_selection(
embeddings_varying_decoder,
static_context_variable_selection[:, max_encoder_length:],
)
)
# LSTM
# run lstm at least once, i.e. encode length has to be > 0
lstm_encoder_lengths = encoder_lengths.where(
encoder_lengths > 0, torch.ones_like(encoder_lengths)
)
# calculate initial state
input_hidden = self.static_context_initial_hidden_lstm(static_embedding).expand(
self.hparams.lstm_layers, -1, -1
)
input_cell = self.static_context_initial_cell_lstm(static_embedding).expand(
self.hparams.lstm_layers, -1, -1
)
# # run local encoder
encoder_output, (hidden, cell) = self.lstm_encoder(
rnn.pack_padded_sequence(
embeddings_varying_encoder,
lstm_encoder_lengths,
enforce_sorted=False,
batch_first=True,
),
(input_hidden, input_cell),
)
encoder_output, _ = rnn.pad_packed_sequence(encoder_output, batch_first=True)
# replace hidden cell with initial input if encoder_length is zero to determine correct initial state
no_encoding = (encoder_lengths == 0)[
None, :, None
] # shape: n_lstm_layers x batch_size x hidden_size
hidden = hidden.masked_scatter(no_encoding, input_hidden)
cell = cell.masked_scatter(no_encoding, input_cell)
# run local decoder
decoder_output, _ = self.lstm_decoder(
rnn.pack_padded_sequence(
embeddings_varying_decoder,
decoder_lengths,
enforce_sorted=False,
batch_first=True,
),
(hidden, cell),
)
decoder_output, _ = rnn.pad_packed_sequence(decoder_output, batch_first=True)
# skip connection over lstm
lstm_output_encoder = self.post_lstm_gate_encoder(encoder_output)
lstm_output_encoder = self.post_lstm_add_norm_encoder(
lstm_output_encoder, embeddings_varying_encoder
)
lstm_output_decoder = self.post_lstm_gate_decoder(decoder_output)
lstm_output_decoder = self.post_lstm_add_norm_decoder(
lstm_output_decoder, embeddings_varying_decoder
)
lstm_output = torch.cat([lstm_output_encoder, lstm_output_decoder], dim=1)
# static enrichment
static_context_enrichment = self.static_context_enrichment(static_embedding)
attn_input = self.static_enrichment(
lstm_output, self.expand_static_context(static_context_enrichment, timesteps)
)
# Attention
attn_output, attn_output_weights = self.multihead_attn(
q=attn_input[:, max_encoder_length:], # query only for predictions
k=attn_input,
v=attn_input,
mask=self.get_attention_mask(
encoder_lengths=encoder_lengths,
decoder_length=timesteps - max_encoder_length,
),
)
# skip connection over attention
attn_output = self.post_attn_gate_norm(
attn_output, attn_input[:, max_encoder_length:]
)
output = self.pos_wise_ff(attn_output)
# skip connection over temporal fusion decoder (not LSTM decoder despite the LSTM output contains
# a skip from the variable selection network)
output = self.pre_output_gate_norm(output, lstm_output[:, max_encoder_length:])
output = self.output_layer(output)
return dict(
prediction=output,
attention=attn_output_weights,
static_variables=static_variable_selection,
encoder_variables=encoder_sparse_weights,
decoder_variables=decoder_sparse_weights,
decoder_lengths=decoder_lengths,
encoder_lengths=encoder_lengths,
groups=x["groups"],
decoder_time_idx=x["decoder_time_idx"],
target_scale=x["target_scale"],
)
|
https://github.com/jdb78/pytorch-forecasting/issues/135
|
RuntimeError Traceback (most recent call last)
<ipython-input-11-a92b5627800b> in <module>
5 val_dataloaders=val_dataloader,
6 max_lr=10.0,
----> 7 min_lr=1e-6,
8 )
9
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/tuning.py in lr_find(self, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
128 mode,
129 early_stop_threshold,
--> 130 datamodule,
131 )
132
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/tuner/lr_finder.py in lr_find(trainer, model, train_dataloader, val_dataloaders, min_lr, max_lr, num_training, mode, early_stop_threshold, datamodule)
173 train_dataloader=train_dataloader,
174 val_dataloaders=val_dataloaders,
--> 175 datamodule=datamodule)
176
177 # Prompt if we stopped early
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
437 self.call_hook('on_fit_start')
438
--> 439 results = self.accelerator_backend.train()
440 self.accelerator_backend.teardown()
441
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in train(self)
52
53 # train or test
---> 54 results = self.train_or_test()
55 return results
56
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/accelerator.py in train_or_test(self)
64 results = self.trainer.run_test()
65 else:
---> 66 results = self.trainer.train()
67 return results
68
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in train(self)
459
460 def train(self):
--> 461 self.run_sanity_check(self.get_model())
462
463 # enable train mode
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_sanity_check(self, ref_model)
645
646 # run eval step
--> 647 _, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
648
649 # allow no returns from eval
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/trainer.py in run_evaluation(self, test_mode, max_batches)
565
566 # lightning module methods
--> 567 output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
568 output = self.evaluation_loop.evaluation_step_end(output)
569
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/trainer/evaluation_loop.py in evaluation_step(self, test_mode, batch, batch_idx, dataloader_idx)
169 output = self.trainer.accelerator_backend.test_step(args)
170 else:
--> 171 output = self.trainer.accelerator_backend.validation_step(args)
172
173 # track batch size for weighted average
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in validation_step(self, args)
76 output = self.__validation_step(args)
77 else:
---> 78 output = self.__validation_step(args)
79
80 return output
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py in __validation_step(self, args)
84 batch = self.to_device(batch)
85 args[0] = batch
---> 86 output = self.trainer.model.validation_step(*args)
87 return output
88
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in validation_step(self, batch, batch_idx)
138 def validation_step(self, batch, batch_idx):
139 x, y = batch
--> 140 log, _ = self.step(x, y, batch_idx, label="val") # log loss
141 self.log("val_loss", log["loss"], on_step=False, on_epoch=True, prog_bar=True)
142 return log
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in step(self, x, y, batch_idx, label)
566 """
567 # extract data and run model
--> 568 log, out = super().step(x, y, batch_idx, label=label)
569 # calculate interpretations etc for latter logging
570 if self.log_interval(label == "train") > 0:
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/base_model.py in step(self, x, y, batch_idx, label)
194 loss = loss * (1 + monotinicity_loss)
195 else:
--> 196 out = self(x)
197 out["prediction"] = self.transform_output(out)
198
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
725 result = self._slow_forward(*input, **kwargs)
726 else:
--> 727 result = self.forward(*input, **kwargs)
728 for hook in itertools.chain(
729 _global_forward_hooks.values(),
~/repo/emart-promo/env/lib/python3.7/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in forward(self, x)
489 encoder_output, (hidden, cell) = self.lstm_encoder(
490 rnn.pack_padded_sequence(
--> 491 embeddings_varying_encoder, lstm_encoder_lengths, enforce_sorted=False, batch_first=True
492 ),
493 (input_hidden, input_cell),
~/repo/emart-promo/env/lib/python3.7/site-packages/torch/nn/utils/rnn.py in pack_padded_sequence(input, lengths, batch_first, enforce_sorted)
242
243 data, batch_sizes = \
--> 244 _VF._pack_padded_sequence(input, lengths, batch_first)
245 return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
246
RuntimeError: 'lengths' argument should be a 1D CPU int64 tensor, but got 1D cuda:0 Long tensor
|
RuntimeError
|
def _log_interpretation(self, outputs, label="train"):
"""
Log interpretation metrics to tensorboard.
"""
# extract interpretations
interpretation = {
# use padded_stack because decoder length histogram can be of different length
name: padded_stack(
[x["interpretation"][name] for x in outputs], side="right", value=0
).sum(0)
for name in outputs[0]["interpretation"].keys()
}
# normalize attention with length histogram squared to account for: 1. zeros in attention and
# 2. higher attention due to less values
attention_occurances = (
interpretation["encoder_length_histogram"][1:].flip(0).cumsum(0).float()
)
attention_occurances = attention_occurances / attention_occurances.max()
attention_occurances = torch.cat(
[
attention_occurances,
torch.ones(
interpretation["attention"].size(0) - attention_occurances.size(0),
dtype=attention_occurances.dtype,
device=attention_occurances.device,
),
],
dim=0,
)
interpretation["attention"] = interpretation[
"attention"
] / attention_occurances.pow(2).clamp(1.0)
interpretation["attention"] = (
interpretation["attention"] / interpretation["attention"].sum()
)
figs = self.plot_interpretation(interpretation) # make interpretation figures
# log to tensorboard
for name, fig in figs.items():
self.logger.experiment.add_figure(
f"{label.capitalize()} {name} importance", fig, global_step=self.global_step
)
# log lengths of encoder/decoder
for type in ["encoder", "decoder"]:
fig, ax = plt.subplots()
lengths = (
torch.stack(
[out["interpretation"][f"{type}_length_histogram"] for out in outputs]
)
.sum(0)
.cpu()
)
if type == "decoder":
start = 1
else:
start = 0
ax.plot(torch.arange(start, start + len(lengths)), lengths)
ax.set_xlabel(f"{type.capitalize()} length")
ax.set_ylabel("Number of samples")
ax.set_title(f"{type.capitalize()} length distribution in {label} epoch")
self.logger.experiment.add_figure(
f"{label.capitalize()} {type} length distribution",
fig,
global_step=self.global_step,
)
|
def _log_interpretation(self, outputs, label="train"):
"""
Log interpretation metrics to tensorboard.
"""
# extract interpretations
interpretation = {
name: torch.stack([x["interpretation"][name] for x in outputs]).sum(0)
for name in outputs[0]["interpretation"].keys()
}
# normalize attention with length histogram squared to account for: 1. zeros in attention and
# 2. higher attention due to less values
attention_occurances = (
interpretation["encoder_length_histogram"][1:].flip(0).cumsum(0).float()
)
attention_occurances = attention_occurances / attention_occurances.max()
attention_occurances = torch.cat(
[
attention_occurances,
torch.ones(
interpretation["attention"].size(0) - attention_occurances.size(0),
dtype=attention_occurances.dtype,
device=attention_occurances.device,
),
],
dim=0,
)
interpretation["attention"] = interpretation[
"attention"
] / attention_occurances.pow(2).clamp(1.0)
interpretation["attention"] = (
interpretation["attention"] / interpretation["attention"].sum()
)
figs = self.plot_interpretation(interpretation) # make interpretation figures
# log to tensorboard
for name, fig in figs.items():
self.logger.experiment.add_figure(
f"{label.capitalize()} {name} importance", fig, global_step=self.global_step
)
# log lengths of encoder/decoder
for type in ["encoder", "decoder"]:
fig, ax = plt.subplots()
lengths = (
torch.stack(
[out["interpretation"][f"{type}_length_histogram"] for out in outputs]
)
.sum(0)
.cpu()
)
if type == "decoder":
start = 1
else:
start = 0
ax.plot(torch.arange(start, start + len(lengths)), lengths)
ax.set_xlabel(f"{type.capitalize()} length")
ax.set_ylabel("Number of samples")
ax.set_title(f"{type.capitalize()} length distribution in {label} epoch")
self.logger.experiment.add_figure(
f"{label.capitalize()} {type} length distribution",
fig,
global_step=self.global_step,
)
|
https://github.com/jdb78/pytorch-forecasting/issues/85
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-32-14fda4f79b4a> in <module>
1 # Train model
----> 2 trainer.fit(
3 tft,
4 train_dataloader = train_dataloader,
5 val_dataloaders = val_dataloader
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_lightning/trainer/states.py in wrapped_fn(self, *args, **kwargs)
46 if entering is not None:
47 self.state = entering
---> 48 result = fn(self, *args, **kwargs)
49
50 # The INTERRUPTED state can be set inside the run function. To indicate that run was interrupted
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
1071 self.accelerator_backend = GPUBackend(self)
1072 model = self.accelerator_backend.setup(model)
-> 1073 results = self.accelerator_backend.train(model)
1074
1075 elif self.use_tpu:
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_lightning/accelerators/gpu_backend.py in train(self, model)
49
50 def train(self, model):
---> 51 results = self.trainer.run_pretrain_routine(model)
52 return results
53
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py in run_pretrain_routine(self, model)
1237
1238 # CORE TRAINING LOOP
-> 1239 self.train()
1240
1241 def _run_sanity_check(self, ref_model, model):
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in train(self)
392 # RUN TNG EPOCH
393 # -----------------
--> 394 self.run_training_epoch()
395
396 if self.max_steps and self.max_steps <= self.global_step:
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_epoch(self)
548
549 # process epoch outputs
--> 550 self.run_training_epoch_end(epoch_output, checkpoint_accumulator, early_stopping_accumulator, num_optimizers)
551
552 # checkpoint callback
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py in run_training_epoch_end(self, epoch_output, checkpoint_accumulator, early_stopping_accumulator, num_optimizers)
662 # run training_epoch_end
663 # a list with a result per optimizer index
--> 664 epoch_output = model.training_epoch_end(epoch_output)
665
666 if isinstance(epoch_output, Result):
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_forecasting/models/base_model.py in training_epoch_end(self, outputs)
133
134 def training_epoch_end(self, outputs):
--> 135 log, _ = self.epoch_end(outputs, label="train")
136 return log
137
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in epoch_end(self, outputs, label)
613 log, out = super().epoch_end(outputs, label=label)
614 if self.log_interval(label == "train") > 0:
--> 615 self._log_interpretation(out, label=label)
616 return log, out
617
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in _log_interpretation(self, outputs, label)
820 """
821 # extract interpretations
--> 822 interpretation = {
823 name: torch.stack([x["interpretation"][name] for x in outputs]).sum(0)
824 for name in outputs[0]["interpretation"].keys()
~/anaconda3/envs/forecasting/lib/python3.8/site-packages/pytorch_forecasting/models/temporal_fusion_transformer/__init__.py in <dictcomp>(.0)
821 # extract interpretations
822 interpretation = {
--> 823 name: torch.stack([x["interpretation"][name] for x in outputs]).sum(0)
824 for name in outputs[0]["interpretation"].keys()
825 }
RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0. Got 6 and 7 in dimension 1 at /tmp/pip-req-build-8yht7tdu/aten/src/THC/generic/THCTensorMath.cu:71
|
RuntimeError
|
def predict_proba(self, X, batch_size=None, n_jobs=1):
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
|
def predict_proba(self, X, batch_size=None, n_jobs=1):
return self._automl.predict(X, batch_size=batch_size, n_jobs=n_jobs)
|
https://github.com/automl/auto-sklearn/issues/409
|
AttributeError Traceback (most recent call last)
<ipython-input-257-a782d97ebce8> in <module>()
----> 1 pipe.predict_proba(clf_x[te_idx])
~/repos/vcf/research/env/lib/python3.5/site-packages/autosklearn/estimators.py in predict_proba(self, X, batch_size, n_jobs)
430 """
431 return self._automl.predict_proba(
--> 432 X, batch_size=batch_size, n_jobs=n_jobs)
433
434
~/repos/vcf/research/env/lib/python3.5/site-packages/autosklearn/automl.py in predict_proba(self, X, batch_size, n_jobs)
944
945 def predict_proba(self, X, batch_size=None, n_jobs=1):
--> 946 return self._automl.predict(X, batch_size=batch_size, n_jobs=n_jobs)
947
948
AttributeError: 'AutoMLClassifier' object has no attribute '_automl'
|
AttributeError
|
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""Predict probabilities of classes for all samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples, n_classes] or [n_samples, n_labels]
The predicted class probabilities.
"""
return super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
|
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""Predict probabilities of classes for all samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples, n_classes] or [n_samples, n_labels]
The predicted class probabilities.
"""
return self._automl.predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
|
https://github.com/automl/auto-sklearn/issues/409
|
AttributeError Traceback (most recent call last)
<ipython-input-257-a782d97ebce8> in <module>()
----> 1 pipe.predict_proba(clf_x[te_idx])
~/repos/vcf/research/env/lib/python3.5/site-packages/autosklearn/estimators.py in predict_proba(self, X, batch_size, n_jobs)
430 """
431 return self._automl.predict_proba(
--> 432 X, batch_size=batch_size, n_jobs=n_jobs)
433
434
~/repos/vcf/research/env/lib/python3.5/site-packages/autosklearn/automl.py in predict_proba(self, X, batch_size, n_jobs)
944
945 def predict_proba(self, X, batch_size=None, n_jobs=1):
--> 946 return self._automl.predict(X, batch_size=batch_size, n_jobs=n_jobs)
947
948
AttributeError: 'AutoMLClassifier' object has no attribute '_automl'
|
AttributeError
|
def main(self):
watch = StopWatch()
watch.start_task("ensemble_builder")
used_time = 0
time_iter = 0
index_run = 0
num_iteration = 0
current_num_models = 0
last_hash = None
current_hash = None
backend = Backend(self.output_dir, self.autosklearn_tmp_dir)
dir_ensemble = os.path.join(
self.autosklearn_tmp_dir, ".auto-sklearn", "predictions_ensemble"
)
dir_valid = os.path.join(
self.autosklearn_tmp_dir, ".auto-sklearn", "predictions_valid"
)
dir_test = os.path.join(
self.autosklearn_tmp_dir, ".auto-sklearn", "predictions_test"
)
paths_ = [dir_ensemble, dir_valid, dir_test]
dir_ensemble_list_mtimes = []
self.logger.debug(
"Starting main loop with %f seconds and %d iterations "
"left." % (self.limit - used_time, num_iteration)
)
while used_time < self.limit or (
self.max_iterations > 0 and self.max_iterations >= num_iteration
):
num_iteration += 1
self.logger.debug("Time left: %f", self.limit - used_time)
self.logger.debug("Time last ensemble building: %f", time_iter)
# Reload the ensemble targets every iteration, important, because cv may
# update the ensemble targets in the cause of running auto-sklearn
# TODO update cv in order to not need this any more!
targets_ensemble = backend.load_targets_ensemble()
# Load the predictions from the models
exists = [os.path.isdir(dir_) for dir_ in paths_]
if not exists[0]: # all(exists):
self.logger.debug("Prediction directory %s does not exist!" % dir_ensemble)
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if self.shared_mode is False:
dir_ensemble_list = sorted(
glob.glob(
os.path.join(
dir_ensemble, "predictions_ensemble_%s_*.npy" % self.seed
)
)
)
if exists[1]:
dir_valid_list = sorted(
glob.glob(
os.path.join(
dir_valid, "predictions_valid_%s_*.npy" % self.seed
)
)
)
else:
dir_valid_list = []
if exists[2]:
dir_test_list = sorted(
glob.glob(
os.path.join(dir_test, "predictions_test_%s_*.npy" % self.seed)
)
)
else:
dir_test_list = []
else:
dir_ensemble_list = sorted(os.listdir(dir_ensemble))
dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []
# Check the modification times because predictions can be updated
# over time!
old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
dir_ensemble_list_mtimes = []
# The ensemble dir can contain non-model files. We filter them and
# use the following list instead
dir_ensemble_model_files = []
for dir_ensemble_file in dir_ensemble_list:
if dir_ensemble_file.endswith("/"):
dir_ensemble_file = dir_ensemble_file[:-1]
if not dir_ensemble_file.endswith(".npy"):
self.logger.warning(
"Error loading file (not .npy): %s", dir_ensemble_file
)
continue
dir_ensemble_model_files.append(dir_ensemble_file)
basename = os.path.basename(dir_ensemble_file)
dir_ensemble_file = os.path.join(dir_ensemble, basename)
mtime = os.path.getmtime(dir_ensemble_file)
dir_ensemble_list_mtimes.append(mtime)
if len(dir_ensemble_model_files) == 0:
self.logger.debug("Directories are empty")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if (
len(dir_ensemble_model_files) <= current_num_models
and old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes
):
self.logger.debug("Nothing has changed since the last time")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
watch.start_task("index_run" + str(index_run))
watch.start_task("ensemble_iter_" + str(num_iteration))
# List of num_runs (which are in the filename) which will be included
# later
include_num_runs = []
backup_num_runs = []
model_and_automl_re = re.compile(r"_([0-9]*)_([0-9]*)\.npy$")
if self.ensemble_nbest is not None:
# Keeps track of the single scores of each model in our ensemble
scores_nbest = []
# The indices of the model that are currently in our ensemble
indices_nbest = []
# The names of the models
model_names = []
model_names_to_scores = dict()
model_idx = 0
for model_name in dir_ensemble_model_files:
if model_name.endswith("/"):
model_name = model_name[:-1]
basename = os.path.basename(model_name)
try:
if self.precision is "16":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float16
)
elif self.precision is "32":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float32
)
elif self.precision is "64":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float64
)
else:
predictions = np.load(os.path.join(dir_ensemble, basename))
score = calculate_score(
targets_ensemble,
predictions,
self.task_type,
self.metric,
predictions.shape[1],
)
except Exception as e:
self.logger.warning("Error loading %s: %s", basename, e)
score = -1
model_names_to_scores[model_name] = score
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if self.ensemble_nbest is not None:
if score <= 0.001:
self.logger.error(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
# If we have less models in our ensemble than ensemble_nbest add
# the current model if it is better than random
elif len(scores_nbest) < self.ensemble_nbest:
scores_nbest.append(score)
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
model_names.append(model_name)
else:
# Take the worst performing model in our ensemble so far
idx = np.argmin(np.array([scores_nbest]))
# If the current model is better than the worst model in
# our ensemble replace it by the current model
if scores_nbest[idx] < score:
self.logger.debug(
"Worst model in our ensemble: %s with "
"score %f will be replaced by model %s "
"with score %f",
model_names[idx],
scores_nbest[idx],
model_name,
score,
)
# Exclude the old model
del scores_nbest[idx]
scores_nbest.append(score)
del include_num_runs[idx]
del indices_nbest[idx]
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
del model_names[idx]
model_names.append(model_name)
# Otherwise exclude the current model from the ensemble
else:
# include_num_runs.append(True)
pass
else:
# Load all predictions that are better than random
if score <= 0.001:
# include_num_runs.append(True)
self.logger.error(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
else:
include_num_runs.append((automl_seed, num_run))
model_idx += 1
# If there is no model better than random guessing, we have to use
# all models which do random guessing
if len(include_num_runs) == 0:
include_num_runs = backup_num_runs
indices_to_model_names = dict()
indices_to_run_num = dict()
for i, model_name in enumerate(dir_ensemble_model_files):
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if (automl_seed, num_run) in include_num_runs:
num_indices = len(indices_to_model_names)
indices_to_model_names[num_indices] = model_name
indices_to_run_num[num_indices] = (automl_seed, num_run)
try:
all_predictions_train, all_predictions_valid, all_predictions_test = (
self.get_all_predictions(
dir_ensemble,
dir_ensemble_model_files,
dir_valid,
dir_valid_list,
dir_test,
dir_test_list,
include_num_runs,
model_and_automl_re,
self.precision,
)
)
except IOError:
self.logger.error("Could not load the predictions.")
continue
if len(include_num_runs) == 0:
self.logger.error("All models do just random guessing")
time.sleep(2)
continue
else:
ensemble = EnsembleSelection(
ensemble_size=self.ensemble_size,
task_type=self.task_type,
metric=self.metric,
)
try:
ensemble.fit(all_predictions_train, targets_ensemble, include_num_runs)
self.logger.info(ensemble)
except ValueError as e:
self.logger.error("Caught ValueError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except IndexError as e:
self.logger.error("Caught IndexError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except Exception as e:
self.logger.error("Caught error! %s", str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
# Output the score
self.logger.info("Training performance: %f" % ensemble.train_score_)
self.logger.info(
"Building the ensemble took %f seconds"
% watch.wall_elapsed("ensemble_iter_" + str(num_iteration))
)
# Set this variable here to avoid re-running the ensemble builder
# every two seconds in case the ensemble did not change
current_num_models = len(dir_ensemble_model_files)
ensemble_predictions = ensemble.predict(all_predictions_train)
if sys.version_info[0] == 2:
ensemble_predictions.flags.writeable = False
current_hash = hash(ensemble_predictions.data)
else:
current_hash = hash(ensemble_predictions.data.tobytes())
# Only output a new ensemble and new predictions if the output of the
# ensemble would actually change!
# TODO this is neither safe (collisions, tests only with the ensemble
# prediction, but not the ensemble), implement a hash function for
# each possible ensemble builder.
if last_hash is not None:
if current_hash == last_hash:
self.logger.info("Ensemble output did not change.")
time.sleep(2)
continue
else:
last_hash = current_hash
else:
last_hash = current_hash
# Save the ensemble for later use in the main auto-sklearn module!
backend.save_ensemble(ensemble, index_run, self.seed)
# Save predictions for valid and test data set
if len(dir_valid_list) == len(dir_ensemble_model_files):
all_predictions_valid = np.array(all_predictions_valid)
ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_valid = ensemble_predictions_valid[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_valid.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_valid.shape[1] == 1
):
bin_array[ensemble_predictions_valid >= 0.5] = 1
else:
sample_num = ensemble_predictions_valid.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_valid[i, :])
bin_array[i, j] = 1
ensemble_predictions_valid = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_valid.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_valid.size > 1000000:
precision = 4
else:
# File size maximally 2.1MB
precision = 6
backend.save_predictions_as_txt(
ensemble_predictions_valid,
"valid",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many validation set predictions (%d)"
"as ensemble predictions (%d)!.",
len(dir_valid_list),
len(dir_ensemble_model_files),
)
del all_predictions_valid
if len(dir_test_list) == len(dir_ensemble_model_files):
all_predictions_test = np.array(all_predictions_test)
ensemble_predictions_test = ensemble.predict(all_predictions_test)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_test = ensemble_predictions_test[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_test[ensemble_predictions_test < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_test.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_test.shape[1] == 1
):
bin_array[ensemble_predictions_test >= 0.5] = 1
else:
sample_num = ensemble_predictions_test.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_test[i, :])
bin_array[i, j] = 1
ensemble_predictions_test = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_test.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_test.size > 1000000:
precision = 4
else:
precision = 6
backend.save_predictions_as_txt(
ensemble_predictions_test,
"test",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many test set predictions (%d) as "
"ensemble predictions (%d)!",
len(dir_test_list),
len(dir_ensemble_model_files),
)
del all_predictions_test
current_num_models = len(dir_ensemble_model_files)
watch.stop_task("index_run" + str(index_run))
time_iter = watch.get_wall_dur("index_run" + str(index_run))
used_time = watch.wall_elapsed("ensemble_builder")
index_run += 1
return
|
def main(self):
watch = StopWatch()
watch.start_task("ensemble_builder")
used_time = 0
time_iter = 0
index_run = 0
num_iteration = 0
current_num_models = 0
last_hash = None
current_hash = None
backend = Backend(self.output_dir, self.autosklearn_tmp_dir)
dir_ensemble = os.path.join(
self.autosklearn_tmp_dir, ".auto-sklearn", "predictions_ensemble"
)
dir_valid = os.path.join(
self.autosklearn_tmp_dir, ".auto-sklearn", "predictions_valid"
)
dir_test = os.path.join(
self.autosklearn_tmp_dir, ".auto-sklearn", "predictions_test"
)
paths_ = [dir_ensemble, dir_valid, dir_test]
dir_ensemble_list_mtimes = []
self.logger.debug(
"Starting main loop with %f seconds and %d iterations "
"left." % (self.limit - used_time, num_iteration)
)
while used_time < self.limit or (
self.max_iterations > 0 and self.max_iterations >= num_iteration
):
num_iteration += 1
self.logger.debug("Time left: %f", self.limit - used_time)
self.logger.debug("Time last ensemble building: %f", time_iter)
# Reload the ensemble targets every iteration, important, because cv may
# update the ensemble targets in the cause of running auto-sklearn
# TODO update cv in order to not need this any more!
targets_ensemble = backend.load_targets_ensemble()
# Load the predictions from the models
exists = [os.path.isdir(dir_) for dir_ in paths_]
if not exists[0]: # all(exists):
self.logger.debug("Prediction directory %s does not exist!" % dir_ensemble)
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if self.shared_mode is False:
dir_ensemble_list = sorted(
glob.glob(
os.path.join(
dir_ensemble, "predictions_ensemble_%s_*.npy" % self.seed
)
)
)
if exists[1]:
dir_valid_list = sorted(
glob.glob(
os.path.join(
dir_valid, "predictions_valid_%s_*.npy" % self.seed
)
)
)
else:
dir_valid_list = []
if exists[2]:
dir_test_list = sorted(
glob.glob(
os.path.join(dir_test, "predictions_test_%s_*.npy" % self.seed)
)
)
else:
dir_test_list = []
else:
dir_ensemble_list = sorted(os.listdir(dir_ensemble))
dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []
# Check the modification times because predictions can be updated
# over time!
old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
dir_ensemble_list_mtimes = []
for dir_ensemble_file in dir_ensemble_list:
if dir_ensemble_file.endswith("/"):
dir_ensemble_file = dir_ensemble_file[:-1]
if not dir_ensemble_file.endswith(".npy"):
self.logger.warning(
"Error loading file (not .npy): %s", dir_ensemble_file
)
continue
basename = os.path.basename(dir_ensemble_file)
dir_ensemble_file = os.path.join(dir_ensemble, basename)
mtime = os.path.getmtime(dir_ensemble_file)
dir_ensemble_list_mtimes.append(mtime)
if len(dir_ensemble_list) == 0:
self.logger.debug("Directories are empty")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if (
len(dir_ensemble_list) <= current_num_models
and old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes
):
self.logger.debug("Nothing has changed since the last time")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
watch.start_task("index_run" + str(index_run))
watch.start_task("ensemble_iter_" + str(num_iteration))
# List of num_runs (which are in the filename) which will be included
# later
include_num_runs = []
backup_num_runs = []
model_and_automl_re = re.compile(r"_([0-9]*)_([0-9]*)\.npy$")
if self.ensemble_nbest is not None:
# Keeps track of the single scores of each model in our ensemble
scores_nbest = []
# The indices of the model that are currently in our ensemble
indices_nbest = []
# The names of the models
model_names = []
model_names_to_scores = dict()
model_idx = 0
for model_name in dir_ensemble_list:
if model_name.endswith("/"):
model_name = model_name[:-1]
basename = os.path.basename(model_name)
try:
if self.precision is "16":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float16
)
elif self.precision is "32":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float32
)
elif self.precision is "64":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float64
)
else:
predictions = np.load(os.path.join(dir_ensemble, basename))
score = calculate_score(
targets_ensemble,
predictions,
self.task_type,
self.metric,
predictions.shape[1],
)
except Exception as e:
self.logger.warning("Error loading %s: %s", basename, e)
score = -1
model_names_to_scores[model_name] = score
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if self.ensemble_nbest is not None:
if score <= 0.001:
self.logger.error(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
# If we have less models in our ensemble than ensemble_nbest add
# the current model if it is better than random
elif len(scores_nbest) < self.ensemble_nbest:
scores_nbest.append(score)
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
model_names.append(model_name)
else:
# Take the worst performing model in our ensemble so far
idx = np.argmin(np.array([scores_nbest]))
# If the current model is better than the worst model in
# our ensemble replace it by the current model
if scores_nbest[idx] < score:
self.logger.debug(
"Worst model in our ensemble: %s with "
"score %f will be replaced by model %s "
"with score %f",
model_names[idx],
scores_nbest[idx],
model_name,
score,
)
# Exclude the old model
del scores_nbest[idx]
scores_nbest.append(score)
del include_num_runs[idx]
del indices_nbest[idx]
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
del model_names[idx]
model_names.append(model_name)
# Otherwise exclude the current model from the ensemble
else:
# include_num_runs.append(True)
pass
else:
# Load all predictions that are better than random
if score <= 0.001:
# include_num_runs.append(True)
self.logger.error(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
else:
include_num_runs.append((automl_seed, num_run))
model_idx += 1
# If there is no model better than random guessing, we have to use
# all models which do random guessing
if len(include_num_runs) == 0:
include_num_runs = backup_num_runs
indices_to_model_names = dict()
indices_to_run_num = dict()
for i, model_name in enumerate(dir_ensemble_list):
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if (automl_seed, num_run) in include_num_runs:
num_indices = len(indices_to_model_names)
indices_to_model_names[num_indices] = model_name
indices_to_run_num[num_indices] = (automl_seed, num_run)
try:
all_predictions_train, all_predictions_valid, all_predictions_test = (
self.get_all_predictions(
dir_ensemble,
dir_ensemble_list,
dir_valid,
dir_valid_list,
dir_test,
dir_test_list,
include_num_runs,
model_and_automl_re,
self.precision,
)
)
except IOError:
self.logger.error("Could not load the predictions.")
continue
if len(include_num_runs) == 0:
self.logger.error("All models do just random guessing")
time.sleep(2)
continue
else:
ensemble = EnsembleSelection(
ensemble_size=self.ensemble_size,
task_type=self.task_type,
metric=self.metric,
)
try:
ensemble.fit(all_predictions_train, targets_ensemble, include_num_runs)
self.logger.info(ensemble)
except ValueError as e:
self.logger.error("Caught ValueError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except IndexError as e:
self.logger.error("Caught IndexError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except Exception as e:
self.logger.error("Caught error! %s", str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
# Output the score
self.logger.info("Training performance: %f" % ensemble.train_score_)
self.logger.info(
"Building the ensemble took %f seconds"
% watch.wall_elapsed("ensemble_iter_" + str(num_iteration))
)
# Set this variable here to avoid re-running the ensemble builder
# every two seconds in case the ensemble did not change
current_num_models = len(dir_ensemble_list)
ensemble_predictions = ensemble.predict(all_predictions_train)
if sys.version_info[0] == 2:
ensemble_predictions.flags.writeable = False
current_hash = hash(ensemble_predictions.data)
else:
current_hash = hash(ensemble_predictions.data.tobytes())
# Only output a new ensemble and new predictions if the output of the
# ensemble would actually change!
# TODO this is neither safe (collisions, tests only with the ensemble
# prediction, but not the ensemble), implement a hash function for
# each possible ensemble builder.
if last_hash is not None:
if current_hash == last_hash:
self.logger.info("Ensemble output did not change.")
time.sleep(2)
continue
else:
last_hash = current_hash
else:
last_hash = current_hash
# Save the ensemble for later use in the main auto-sklearn module!
backend.save_ensemble(ensemble, index_run, self.seed)
# Save predictions for valid and test data set
if len(dir_valid_list) == len(dir_ensemble_list):
all_predictions_valid = np.array(all_predictions_valid)
ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_valid = ensemble_predictions_valid[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_valid.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_valid.shape[1] == 1
):
bin_array[ensemble_predictions_valid >= 0.5] = 1
else:
sample_num = ensemble_predictions_valid.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_valid[i, :])
bin_array[i, j] = 1
ensemble_predictions_valid = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_valid.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_valid.size > 1000000:
precision = 4
else:
# File size maximally 2.1MB
precision = 6
backend.save_predictions_as_txt(
ensemble_predictions_valid,
"valid",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many validation set predictions (%d)"
"as ensemble predictions (%d)!.",
len(dir_valid_list),
len(dir_ensemble_list),
)
del all_predictions_valid
if len(dir_test_list) == len(dir_ensemble_list):
all_predictions_test = np.array(all_predictions_test)
ensemble_predictions_test = ensemble.predict(all_predictions_test)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_test = ensemble_predictions_test[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_test[ensemble_predictions_test < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_test.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_test.shape[1] == 1
):
bin_array[ensemble_predictions_test >= 0.5] = 1
else:
sample_num = ensemble_predictions_test.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_test[i, :])
bin_array[i, j] = 1
ensemble_predictions_test = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_test.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_test.size > 1000000:
precision = 4
else:
precision = 6
backend.save_predictions_as_txt(
ensemble_predictions_test,
"test",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many test set predictions (%d) as "
"ensemble predictions (%d)!",
len(dir_test_list),
len(dir_ensemble_list),
)
del all_predictions_test
current_num_models = len(dir_ensemble_list)
watch.stop_task("index_run" + str(index_run))
time_iter = watch.get_wall_dur("index_run" + str(index_run))
used_time = watch.wall_elapsed("ensemble_builder")
index_run += 1
return
|
https://github.com/automl/auto-sklearn/issues/69
|
Traceback (most recent call last):
File "truffles-autosklearn-multy.py", line 82, in spawn_classifier
c.fit(X_train, y_train, metric='f1_metric')
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/estimators.py", line 271, in fit
feat_type, dataset_name)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/automl.py", line 262, in fit
return self._fit(loaded_data_manager)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/automl.py", line 526, in _fit
self._load_models()
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/automl.py", line 647, in _load_models
self.models_ = self._backend.load_all_models(seed)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/util/backend.py", line 171, in load_all_models
models = self.load_models_by_file_names(model_files)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/util/backend.py", line 185, in load_models_by_file_names
seed = int(basename_parts[0])
ValueError: invalid literal for int() with base 10: 'tmpJmPf2D'
|
ValueError
|
def main(self):
watch = StopWatch()
watch.start_task("ensemble_builder")
used_time = 0
time_iter = 0
index_run = 0
num_iteration = 0
current_num_models = 0
last_hash = None
current_hash = None
dir_ensemble = os.path.join(
self.backend.temporary_directory, ".auto-sklearn", "predictions_ensemble"
)
dir_valid = os.path.join(
self.backend.temporary_directory, ".auto-sklearn", "predictions_valid"
)
dir_test = os.path.join(
self.backend.temporary_directory, ".auto-sklearn", "predictions_test"
)
paths_ = [dir_ensemble, dir_valid, dir_test]
dir_ensemble_list_mtimes = []
self.logger.debug(
"Starting main loop with %f seconds and %d iterations "
"left." % (self.limit - used_time, num_iteration)
)
while used_time < self.limit or (
self.max_iterations > 0 and self.max_iterations >= num_iteration
):
num_iteration += 1
self.logger.debug("Time left: %f", self.limit - used_time)
self.logger.debug("Time last ensemble building: %f", time_iter)
# Reload the ensemble targets every iteration, important, because cv may
# update the ensemble targets in the cause of running auto-sklearn
# TODO update cv in order to not need this any more!
targets_ensemble = self.backend.load_targets_ensemble()
# Load the predictions from the models
exists = [os.path.isdir(dir_) for dir_ in paths_]
if not exists[0]: # all(exists):
self.logger.debug("Prediction directory %s does not exist!" % dir_ensemble)
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if self.shared_mode is False:
dir_ensemble_list = sorted(
glob.glob(
os.path.join(
dir_ensemble, "predictions_ensemble_%s_*.npy" % self.seed
)
)
)
if exists[1]:
dir_valid_list = sorted(
glob.glob(
os.path.join(
dir_valid, "predictions_valid_%s_*.npy" % self.seed
)
)
)
else:
dir_valid_list = []
if exists[2]:
dir_test_list = sorted(
glob.glob(
os.path.join(dir_test, "predictions_test_%s_*.npy" % self.seed)
)
)
else:
dir_test_list = []
else:
dir_ensemble_list = sorted(os.listdir(dir_ensemble))
dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []
# Check the modification times because predictions can be updated
# over time!
old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
dir_ensemble_list_mtimes = []
# The ensemble dir can contain non-model files. We filter them and
# use the following list instead
dir_ensemble_model_files = []
for dir_ensemble_file in dir_ensemble_list:
if dir_ensemble_file.endswith("/"):
dir_ensemble_file = dir_ensemble_file[:-1]
if not dir_ensemble_file.endswith(".npy"):
self.logger.warning(
"Error loading file (not .npy): %s", dir_ensemble_file
)
continue
dir_ensemble_model_files.append(dir_ensemble_file)
basename = os.path.basename(dir_ensemble_file)
dir_ensemble_file = os.path.join(dir_ensemble, basename)
mtime = os.path.getmtime(dir_ensemble_file)
dir_ensemble_list_mtimes.append(mtime)
if len(dir_ensemble_model_files) == 0:
self.logger.debug("Directories are empty")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if (
len(dir_ensemble_model_files) <= current_num_models
and old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes
):
self.logger.debug("Nothing has changed since the last time")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO restructure time management in the ensemble builder,
# what is the time of index_run actually needed for?
watch.start_task("index_run" + str(index_run))
watch.start_task("ensemble_iter_" + str(num_iteration))
# List of num_runs (which are in the filename) which will be included
# later
include_num_runs = []
backup_num_runs = []
model_and_automl_re = re.compile(r"_([0-9]*)_([0-9]*)\.npy$")
if self.ensemble_nbest is not None:
# Keeps track of the single scores of each model in our ensemble
scores_nbest = []
# The indices of the model that are currently in our ensemble
indices_nbest = []
# The names of the models
model_names = []
model_names_to_scores = dict()
model_idx = 0
for model_name in dir_ensemble_model_files:
if model_name.endswith("/"):
model_name = model_name[:-1]
basename = os.path.basename(model_name)
try:
if self.precision is "16":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float16
)
elif self.precision is "32":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float32
)
elif self.precision is "64":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float64
)
else:
predictions = np.load(os.path.join(dir_ensemble, basename))
score = calculate_score(
targets_ensemble,
predictions,
self.task_type,
self.metric,
predictions.shape[1],
)
except Exception as e:
self.logger.warning("Error loading %s: %s", basename, e)
score = -1
model_names_to_scores[model_name] = score
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if self.ensemble_nbest is not None:
if score <= 0.001:
self.logger.info(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
# If we have less models in our ensemble than ensemble_nbest add
# the current model if it is better than random
elif len(scores_nbest) < self.ensemble_nbest:
scores_nbest.append(score)
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
model_names.append(model_name)
else:
# Take the worst performing model in our ensemble so far
idx = np.argmin(np.array([scores_nbest]))
# If the current model is better than the worst model in
# our ensemble replace it by the current model
if scores_nbest[idx] < score:
self.logger.info(
"Worst model in our ensemble: %s with score %f "
"will be replaced by model %s with score %f",
model_names[idx],
scores_nbest[idx],
model_name,
score,
)
# Exclude the old model
del scores_nbest[idx]
scores_nbest.append(score)
del include_num_runs[idx]
del indices_nbest[idx]
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
del model_names[idx]
model_names.append(model_name)
# Otherwise exclude the current model from the ensemble
else:
# include_num_runs.append(True)
pass
else:
# Load all predictions that are better than random
if score <= 0.001:
# include_num_runs.append(True)
self.logger.info(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
else:
include_num_runs.append((automl_seed, num_run))
model_idx += 1
# If there is no model better than random guessing, we have to use
# all models which do random guessing
if len(include_num_runs) == 0:
include_num_runs = backup_num_runs
indices_to_model_names = dict()
indices_to_run_num = dict()
for i, model_name in enumerate(dir_ensemble_model_files):
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if (automl_seed, num_run) in include_num_runs:
num_indices = len(indices_to_model_names)
indices_to_model_names[num_indices] = model_name
indices_to_run_num[num_indices] = (automl_seed, num_run)
try:
all_predictions_train, all_predictions_valid, all_predictions_test = (
self.get_all_predictions(
dir_ensemble,
dir_ensemble_model_files,
dir_valid,
dir_valid_list,
dir_test,
dir_test_list,
include_num_runs,
model_and_automl_re,
self.precision,
)
)
except IOError:
self.logger.error("Could not load the predictions.")
continue
if len(include_num_runs) == 0:
self.logger.error("All models do just random guessing")
time.sleep(2)
continue
else:
ensemble = EnsembleSelection(
ensemble_size=self.ensemble_size,
task_type=self.task_type,
metric=self.metric,
)
try:
ensemble.fit(all_predictions_train, targets_ensemble, include_num_runs)
self.logger.info(ensemble)
except ValueError as e:
self.logger.error("Caught ValueError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except IndexError as e:
self.logger.error("Caught IndexError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except Exception as e:
self.logger.error("Caught error! %s", str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
# Output the score
self.logger.info("Training performance: %f" % ensemble.train_score_)
self.logger.info(
"Building the ensemble took %f seconds"
% watch.wall_elapsed("ensemble_iter_" + str(num_iteration))
)
# Set this variable here to avoid re-running the ensemble builder
# every two seconds in case the ensemble did not change
current_num_models = len(dir_ensemble_model_files)
ensemble_predictions = ensemble.predict(all_predictions_train)
if sys.version_info[0] == 2:
ensemble_predictions.flags.writeable = False
current_hash = hash(ensemble_predictions.data)
else:
current_hash = hash(ensemble_predictions.data.tobytes())
# Only output a new ensemble and new predictions if the output of the
# ensemble would actually change!
# TODO this is neither safe (collisions, tests only with the ensemble
# prediction, but not the ensemble), implement a hash function for
# each possible ensemble builder.
if last_hash is not None:
if current_hash == last_hash:
self.logger.info("Ensemble output did not change.")
time.sleep(2)
continue
else:
last_hash = current_hash
else:
last_hash = current_hash
# Save the ensemble for later use in the main auto-sklearn module!
self.backend.save_ensemble(ensemble, index_run, self.seed)
# Save predictions for valid and test data set
if len(dir_valid_list) == len(dir_ensemble_model_files):
all_predictions_valid = np.array(all_predictions_valid)
ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_valid = ensemble_predictions_valid[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_valid.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_valid.shape[1] == 1
):
bin_array[ensemble_predictions_valid >= 0.5] = 1
else:
sample_num = ensemble_predictions_valid.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_valid[i, :])
bin_array[i, j] = 1
ensemble_predictions_valid = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_valid.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_valid.size > 1000000:
precision = 4
else:
# File size maximally 2.1MB
precision = 6
self.backend.save_predictions_as_txt(
ensemble_predictions_valid,
"valid",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many validation set predictions (%d)"
"as ensemble predictions (%d)!.",
len(dir_valid_list),
len(dir_ensemble_model_files),
)
del all_predictions_valid
if len(dir_test_list) == len(dir_ensemble_model_files):
all_predictions_test = np.array(all_predictions_test)
ensemble_predictions_test = ensemble.predict(all_predictions_test)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_test = ensemble_predictions_test[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_test[ensemble_predictions_test < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_test.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_test.shape[1] == 1
):
bin_array[ensemble_predictions_test >= 0.5] = 1
else:
sample_num = ensemble_predictions_test.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_test[i, :])
bin_array[i, j] = 1
ensemble_predictions_test = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_test.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_test.size > 1000000:
precision = 4
else:
precision = 6
self.backend.save_predictions_as_txt(
ensemble_predictions_test,
"test",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many test set predictions (%d) as "
"ensemble predictions (%d)!",
len(dir_test_list),
len(dir_ensemble_model_files),
)
del all_predictions_test
current_num_models = len(dir_ensemble_model_files)
watch.stop_task("index_run" + str(index_run))
time_iter = watch.get_wall_dur("index_run" + str(index_run))
used_time = watch.wall_elapsed("ensemble_builder")
index_run += 1
return
|
def main(self):
watch = StopWatch()
watch.start_task("ensemble_builder")
used_time = 0
time_iter = 0
index_run = 0
num_iteration = 0
current_num_models = 0
last_hash = None
current_hash = None
dir_ensemble = os.path.join(
self.backend.temporary_directory, ".auto-sklearn", "predictions_ensemble"
)
dir_valid = os.path.join(
self.backend.temporary_directory, ".auto-sklearn", "predictions_valid"
)
dir_test = os.path.join(
self.backend.temporary_directory, ".auto-sklearn", "predictions_test"
)
paths_ = [dir_ensemble, dir_valid, dir_test]
dir_ensemble_list_mtimes = []
self.logger.debug(
"Starting main loop with %f seconds and %d iterations "
"left." % (self.limit - used_time, num_iteration)
)
while used_time < self.limit or (
self.max_iterations > 0 and self.max_iterations >= num_iteration
):
num_iteration += 1
self.logger.debug("Time left: %f", self.limit - used_time)
self.logger.debug("Time last ensemble building: %f", time_iter)
# Reload the ensemble targets every iteration, important, because cv may
# update the ensemble targets in the cause of running auto-sklearn
# TODO update cv in order to not need this any more!
targets_ensemble = self.backend.load_targets_ensemble()
# Load the predictions from the models
exists = [os.path.isdir(dir_) for dir_ in paths_]
if not exists[0]: # all(exists):
self.logger.debug("Prediction directory %s does not exist!" % dir_ensemble)
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if self.shared_mode is False:
dir_ensemble_list = sorted(
glob.glob(
os.path.join(
dir_ensemble, "predictions_ensemble_%s_*.npy" % self.seed
)
)
)
if exists[1]:
dir_valid_list = sorted(
glob.glob(
os.path.join(
dir_valid, "predictions_valid_%s_*.npy" % self.seed
)
)
)
else:
dir_valid_list = []
if exists[2]:
dir_test_list = sorted(
glob.glob(
os.path.join(dir_test, "predictions_test_%s_*.npy" % self.seed)
)
)
else:
dir_test_list = []
else:
dir_ensemble_list = sorted(os.listdir(dir_ensemble))
dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []
# Check the modification times because predictions can be updated
# over time!
old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
dir_ensemble_list_mtimes = []
for dir_ensemble_file in dir_ensemble_list:
if dir_ensemble_file.endswith("/"):
dir_ensemble_file = dir_ensemble_file[:-1]
if not dir_ensemble_file.endswith(".npy"):
self.logger.warning(
"Error loading file (not .npy): %s", dir_ensemble_file
)
continue
basename = os.path.basename(dir_ensemble_file)
dir_ensemble_file = os.path.join(dir_ensemble, basename)
mtime = os.path.getmtime(dir_ensemble_file)
dir_ensemble_list_mtimes.append(mtime)
if len(dir_ensemble_list) == 0:
self.logger.debug("Directories are empty")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
if (
len(dir_ensemble_list) <= current_num_models
and old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes
):
self.logger.debug("Nothing has changed since the last time")
time.sleep(2)
used_time = watch.wall_elapsed("ensemble_builder")
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO restructure time management in the ensemble builder,
# what is the time of index_run actually needed for?
watch.start_task("index_run" + str(index_run))
watch.start_task("ensemble_iter_" + str(num_iteration))
# List of num_runs (which are in the filename) which will be included
# later
include_num_runs = []
backup_num_runs = []
model_and_automl_re = re.compile(r"_([0-9]*)_([0-9]*)\.npy$")
if self.ensemble_nbest is not None:
# Keeps track of the single scores of each model in our ensemble
scores_nbest = []
# The indices of the model that are currently in our ensemble
indices_nbest = []
# The names of the models
model_names = []
model_names_to_scores = dict()
model_idx = 0
for model_name in dir_ensemble_list:
if model_name.endswith("/"):
model_name = model_name[:-1]
basename = os.path.basename(model_name)
try:
if self.precision is "16":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float16
)
elif self.precision is "32":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float32
)
elif self.precision is "64":
predictions = np.load(os.path.join(dir_ensemble, basename)).astype(
dtype=np.float64
)
else:
predictions = np.load(os.path.join(dir_ensemble, basename))
score = calculate_score(
targets_ensemble,
predictions,
self.task_type,
self.metric,
predictions.shape[1],
)
except Exception as e:
self.logger.warning("Error loading %s: %s", basename, e)
score = -1
model_names_to_scores[model_name] = score
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if self.ensemble_nbest is not None:
if score <= 0.001:
self.logger.info(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
# If we have less models in our ensemble than ensemble_nbest add
# the current model if it is better than random
elif len(scores_nbest) < self.ensemble_nbest:
scores_nbest.append(score)
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
model_names.append(model_name)
else:
# Take the worst performing model in our ensemble so far
idx = np.argmin(np.array([scores_nbest]))
# If the current model is better than the worst model in
# our ensemble replace it by the current model
if scores_nbest[idx] < score:
self.logger.info(
"Worst model in our ensemble: %s with score %f "
"will be replaced by model %s with score %f",
model_names[idx],
scores_nbest[idx],
model_name,
score,
)
# Exclude the old model
del scores_nbest[idx]
scores_nbest.append(score)
del include_num_runs[idx]
del indices_nbest[idx]
indices_nbest.append(model_idx)
include_num_runs.append((automl_seed, num_run))
del model_names[idx]
model_names.append(model_name)
# Otherwise exclude the current model from the ensemble
else:
# include_num_runs.append(True)
pass
else:
# Load all predictions that are better than random
if score <= 0.001:
# include_num_runs.append(True)
self.logger.info(
"Model only predicts at random: "
+ model_name
+ " has score: "
+ str(score)
)
backup_num_runs.append((automl_seed, num_run))
else:
include_num_runs.append((automl_seed, num_run))
model_idx += 1
# If there is no model better than random guessing, we have to use
# all models which do random guessing
if len(include_num_runs) == 0:
include_num_runs = backup_num_runs
indices_to_model_names = dict()
indices_to_run_num = dict()
for i, model_name in enumerate(dir_ensemble_list):
match = model_and_automl_re.search(model_name)
automl_seed = int(match.group(1))
num_run = int(match.group(2))
if (automl_seed, num_run) in include_num_runs:
num_indices = len(indices_to_model_names)
indices_to_model_names[num_indices] = model_name
indices_to_run_num[num_indices] = (automl_seed, num_run)
try:
all_predictions_train, all_predictions_valid, all_predictions_test = (
self.get_all_predictions(
dir_ensemble,
dir_ensemble_list,
dir_valid,
dir_valid_list,
dir_test,
dir_test_list,
include_num_runs,
model_and_automl_re,
self.precision,
)
)
except IOError:
self.logger.error("Could not load the predictions.")
continue
if len(include_num_runs) == 0:
self.logger.error("All models do just random guessing")
time.sleep(2)
continue
else:
ensemble = EnsembleSelection(
ensemble_size=self.ensemble_size,
task_type=self.task_type,
metric=self.metric,
)
try:
ensemble.fit(all_predictions_train, targets_ensemble, include_num_runs)
self.logger.info(ensemble)
except ValueError as e:
self.logger.error("Caught ValueError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except IndexError as e:
self.logger.error("Caught IndexError: " + str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
except Exception as e:
self.logger.error("Caught error! %s", str(e))
used_time = watch.wall_elapsed("ensemble_builder")
time.sleep(2)
continue
# Output the score
self.logger.info("Training performance: %f" % ensemble.train_score_)
self.logger.info(
"Building the ensemble took %f seconds"
% watch.wall_elapsed("ensemble_iter_" + str(num_iteration))
)
# Set this variable here to avoid re-running the ensemble builder
# every two seconds in case the ensemble did not change
current_num_models = len(dir_ensemble_list)
ensemble_predictions = ensemble.predict(all_predictions_train)
if sys.version_info[0] == 2:
ensemble_predictions.flags.writeable = False
current_hash = hash(ensemble_predictions.data)
else:
current_hash = hash(ensemble_predictions.data.tobytes())
# Only output a new ensemble and new predictions if the output of the
# ensemble would actually change!
# TODO this is neither safe (collisions, tests only with the ensemble
# prediction, but not the ensemble), implement a hash function for
# each possible ensemble builder.
if last_hash is not None:
if current_hash == last_hash:
self.logger.info("Ensemble output did not change.")
time.sleep(2)
continue
else:
last_hash = current_hash
else:
last_hash = current_hash
# Save the ensemble for later use in the main auto-sklearn module!
self.backend.save_ensemble(ensemble, index_run, self.seed)
# Save predictions for valid and test data set
if len(dir_valid_list) == len(dir_ensemble_list):
all_predictions_valid = np.array(all_predictions_valid)
ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_valid = ensemble_predictions_valid[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_valid.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_valid.shape[1] == 1
):
bin_array[ensemble_predictions_valid >= 0.5] = 1
else:
sample_num = ensemble_predictions_valid.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_valid[i, :])
bin_array[i, j] = 1
ensemble_predictions_valid = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_valid.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_valid.size > 1000000:
precision = 4
else:
# File size maximally 2.1MB
precision = 6
self.backend.save_predictions_as_txt(
ensemble_predictions_valid,
"valid",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many validation set predictions (%d)"
"as ensemble predictions (%d)!.",
len(dir_valid_list),
len(dir_ensemble_list),
)
del all_predictions_valid
if len(dir_test_list) == len(dir_ensemble_list):
all_predictions_test = np.array(all_predictions_test)
ensemble_predictions_test = ensemble.predict(all_predictions_test)
if self.task_type == BINARY_CLASSIFICATION:
ensemble_predictions_test = ensemble_predictions_test[:, 1]
if self.low_precision:
if self.task_type in [
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
MULTILABEL_CLASSIFICATION,
]:
ensemble_predictions_test[ensemble_predictions_test < 1e-4] = 0.0
if self.metric in [BAC_METRIC, F1_METRIC]:
bin_array = np.zeros(
ensemble_predictions_test.shape, dtype=np.int32
)
if (self.task_type != MULTICLASS_CLASSIFICATION) or (
ensemble_predictions_test.shape[1] == 1
):
bin_array[ensemble_predictions_test >= 0.5] = 1
else:
sample_num = ensemble_predictions_test.shape[0]
for i in range(sample_num):
j = np.argmax(ensemble_predictions_test[i, :])
bin_array[i, j] = 1
ensemble_predictions_test = bin_array
if self.task_type in CLASSIFICATION_TASKS:
if ensemble_predictions_test.size < (20000 * 20):
precision = 3
else:
precision = 2
else:
if ensemble_predictions_test.size > 1000000:
precision = 4
else:
precision = 6
self.backend.save_predictions_as_txt(
ensemble_predictions_test,
"test",
index_run,
prefix=self.dataset_name,
precision=precision,
)
else:
self.logger.info(
"Could not find as many test set predictions (%d) as "
"ensemble predictions (%d)!",
len(dir_test_list),
len(dir_ensemble_list),
)
del all_predictions_test
current_num_models = len(dir_ensemble_list)
watch.stop_task("index_run" + str(index_run))
time_iter = watch.get_wall_dur("index_run" + str(index_run))
used_time = watch.wall_elapsed("ensemble_builder")
index_run += 1
return
|
https://github.com/automl/auto-sklearn/issues/69
|
Traceback (most recent call last):
File "truffles-autosklearn-multy.py", line 82, in spawn_classifier
c.fit(X_train, y_train, metric='f1_metric')
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/estimators.py", line 271, in fit
feat_type, dataset_name)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/automl.py", line 262, in fit
return self._fit(loaded_data_manager)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/automl.py", line 526, in _fit
self._load_models()
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/automl.py", line 647, in _load_models
self.models_ = self._backend.load_all_models(seed)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/util/backend.py", line 171, in load_all_models
models = self.load_models_by_file_names(model_files)
File "/home/ekobylkin/anaconda2/lib/python2.7/site-packages/AutoSklearn-0.0.1.dev0-py2.7-linux-x86_64.egg/autosklearn/util/backend.py", line 185, in load_models_by_file_names
seed = int(basename_parts[0])
ValueError: invalid literal for int() with base 10: 'tmpJmPf2D'
|
ValueError
|
def fit(
self,
X,
y,
task=MULTICLASS_CLASSIFICATION,
metric="acc_metric",
feat_type=None,
dataset_name=None,
):
if not self._shared_mode:
self._backend.context.delete_directories()
else:
# If this fails, it's likely that this is the first call to get
# the data manager
try:
D = self._backend.load_datamanager()
dataset_name = D.name
except IOError:
pass
self._backend.context.create_directories()
if dataset_name is None:
dataset_name = hash_numpy_array(X)
self._backend.save_start_time(self._seed)
self._stopwatch = StopWatch()
self._dataset_name = dataset_name
self._stopwatch.start_task(self._dataset_name)
self._logger = self._get_logger(dataset_name)
if isinstance(metric, str):
metric = STRING_TO_METRIC[metric]
if feat_type is not None and len(feat_type) != X.shape[1]:
raise ValueError(
"Array feat_type does not have same number of "
"variables as X has features. %d vs %d." % (len(feat_type), X.shape[1])
)
if feat_type is not None and not all([isinstance(f, str) for f in feat_type]):
raise ValueError("Array feat_type must only contain strings.")
if feat_type is not None:
for ft in feat_type:
if ft.lower() not in ["categorical", "numerical"]:
raise ValueError(
"Only `Categorical` and `Numerical` are "
"valid feature types, you passed `%s`" % ft
)
self._data_memory_limit = None
loaded_data_manager = XYDataManager(
X,
y,
task=task,
metric=metric,
feat_type=feat_type,
dataset_name=dataset_name,
encode_labels=False,
)
return self._fit(loaded_data_manager)
|
def fit(
self,
X,
y,
task=MULTICLASS_CLASSIFICATION,
metric="acc_metric",
feat_type=None,
dataset_name=None,
):
if not self._shared_mode:
self._backend.context.delete_directories()
else:
# If this fails, it's likely that this is the first call to get
# the data manager
try:
D = self._backend.load_datamanager()
dataset_name = D.name
except IOError:
pass
self._backend.context.create_directories()
if dataset_name is None:
m = hashlib.md5()
m.update(X.data)
dataset_name = m.hexdigest()
self._backend.save_start_time(self._seed)
self._stopwatch = StopWatch()
self._dataset_name = dataset_name
self._stopwatch.start_task(self._dataset_name)
self._logger = self._get_logger(dataset_name)
if isinstance(metric, str):
metric = STRING_TO_METRIC[metric]
if feat_type is not None and len(feat_type) != X.shape[1]:
raise ValueError(
"Array feat_type does not have same number of "
"variables as X has features. %d vs %d." % (len(feat_type), X.shape[1])
)
if feat_type is not None and not all([isinstance(f, str) for f in feat_type]):
raise ValueError("Array feat_type must only contain strings.")
if feat_type is not None:
for ft in feat_type:
if ft.lower() not in ["categorical", "numerical"]:
raise ValueError(
"Only `Categorical` and `Numerical` are "
"valid feature types, you passed `%s`" % ft
)
self._data_memory_limit = None
loaded_data_manager = XYDataManager(
X,
y,
task=task,
metric=metric,
feat_type=feat_type,
dataset_name=dataset_name,
encode_labels=False,
)
return self._fit(loaded_data_manager)
|
https://github.com/automl/auto-sklearn/issues/115
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#Boiler plate\n",
"import time\n",
"from tqdm import *\n",
"from tpot import TPOT\n",
"import autosklearn.classification\n",
"import sklearn.datasets\n",
"import numpy as np\n",
"import pandas as pd\n",
"from sklearn.cross_validation import train_test_split\n",
"\n",
"#Set the random seed for reproducibility across computers\n",
"vRndSeed = np.random.RandomState(seed=786196074)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>0</th>\n",
" <th>1</th>\n",
" <th>2</th>\n",
" <th>3</th>\n",
" <th>4</th>\n",
" <th>5</th>\n",
" <th>6</th>\n",
" <th>7</th>\n",
" <th>8</th>\n",
" <th>9</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>24</td>\n",
" <td>2</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>2</td>\n",
" <td>3</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>45</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>10</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>4</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>43</td>\n",
" <td>2</td>\n",
" <td>3</td>\n",
" <td>7</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>4</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>42</td>\n",
" <td>3</td>\n",
" <td>2</td>\n",
" <td>9</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>36</td>\n",
" <td>3</td>\n",
" <td>3</td>\n",
" <td>8</td>\n",
" <td>1</td>\n",
" <td>1</td>\n",
" <td>3</td>\n",
" <td>2</td>\n",
" <td>0</td>\n",
" <td>1</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" 0 1 2 3 4 5 6 7 8 9\n",
"0 24 2 3 3 1 1 2 3 0 1\n",
"1 45 1 3 10 1 1 3 4 0 1\n",
"2 43 2 3 7 1 1 3 4 0 1\n",
"3 42 3 2 9 1 1 3 3 0 1\n",
"4 36 3 3 8 1 1 3 2 0 1"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"MLMe = pd.read_table(\"data/dtcmc.data.txt\", \",\", header=None)\n",
"MLMe.head()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"#Factorize encodings if necessary"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"MLMe.rename(columns={9 : 'class'}, inplace=True)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0 int64\n",
"1 int64\n",
"2 int64\n",
"3 int64\n",
"4 int64\n",
"5 int64\n",
"6 int64\n",
"7 int64\n",
"8 int64\n",
"class int64\n",
"dtype: object"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"MLMe.dtypes"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0 False\n",
"1 False\n",
"2 False\n",
"3 False\n",
"4 False\n",
"5 False\n",
"6 False\n",
"7 False\n",
"8 False\n",
"class False\n",
"dtype: bool"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pd.isnull(MLMe).any()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"#Validate - need to fill out this section more\n",
"MLMe_class = MLMe['class'].values\n",
"assert (len(MLMe_class) == len(MLMe.index)), \"Not matching\""
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"((1473, 10), (1473,))"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"MLMe.shape, MLMe_class.shape"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"(1104, 369)"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"training_indices, validation_indices = training_indices, testing_indices = train_test_split(MLMe.index, stratify = MLMe_class, train_size=0.75, test_size=0.25)\n",
"\n",
"training_indices.size, validation_indices.size"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"X_train = np.ascontiguousarray(MLMe.drop('class',axis=1).loc[training_indices].values)\n",
"y_train = np.ascontiguousarray(MLMe.loc[training_indices,'class'].values)\n",
"\n",
"X_test = np.ascontiguousarray(MLMe.drop('class',axis=1).loc[validation_indices].values)\n",
"y_test = np.ascontiguousarray(MLMe.loc[validation_indices, 'class'].values)\n",
"\n",
"#X_train, y_train, X_test, y_test"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"((1104, 9), (1104,), (369, 9), (369,))"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"X_train.shape, y_train.shape, X_test.shape, y_test.shape"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"(dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'))"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"X_train.dtype, y_train.dtype, X_test.dtype, y_test.dtype"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"( C_CONTIGUOUS : True\n",
" F_CONTIGUOUS : False\n",
" OWNDATA : True\n",
" WRITEABLE : True\n",
" ALIGNED : True\n",
" UPDATEIFCOPY : False, C_CONTIGUOUS : True\n",
" F_CONTIGUOUS : True\n",
" OWNDATA : True\n",
" WRITEABLE : True\n",
" ALIGNED : True\n",
" UPDATEIFCOPY : False, C_CONTIGUOUS : True\n",
" F_CONTIGUOUS : False\n",
" OWNDATA : True\n",
" WRITEABLE : True\n",
" ALIGNED : True\n",
" UPDATEIFCOPY : False, C_CONTIGUOUS : True\n",
" F_CONTIGUOUS : True\n",
" OWNDATA : True\n",
" WRITEABLE : True\n",
" ALIGNED : True\n",
" UPDATEIFCOPY : False)"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"X_train.flags, y_train.flags, X_test.flags, y_test.flags"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 2%|▏ | 101/5200 [00:36<31:29, 2.70pipeline/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 1 - Current best internal CV score: 0.62016\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 4%|▎ | 189/5200 [00:00<1:43:21, 1.24s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 2 - Current best internal CV score: 0.62450\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 6%|▌ | 300/5200 [02:47<38:20, 2.13pipeline/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 3 - Current best internal CV score: 0.63011\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 7%|▋ | 389/5200 [00:00<1:57:04, 1.46s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 4 - Current best internal CV score: 0.63136\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 9%|▉ | 489/5200 [00:00<2:40:16, 2.04s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 5 - Current best internal CV score: 0.63415\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 11%|█▏ | 587/5200 [00:00<2:13:14, 1.73s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 6 - Current best internal CV score: 0.64038\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 13%|█▎ | 693/5200 [00:00<2:57:17, 2.36s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 7 - Current best internal CV score: 0.64038\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 15%|█▌ | 789/5200 [00:00<2:12:40, 1.80s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 8 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 17%|█▋ | 896/5200 [00:00<1:53:17, 1.58s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 9 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 19%|█▉ | 986/5200 [00:00<1:24:58, 1.21s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 10 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 21%|██ | 1087/5200 [00:00<1:47:46, 1.57s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 11 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 23%|██▎ | 1188/5200 [00:00<1:25:09, 1.27s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 12 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 25%|██▍ | 1292/5200 [00:00<1:26:51, 1.33s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 13 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 27%|██▋ | 1394/5200 [00:00<1:17:11, 1.22s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 14 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 29%|██▊ | 1484/5200 [00:00<56:53, 1.09pipeline/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 15 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 31%|███ | 1590/5200 [00:00<1:24:47, 1.41s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 16 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 33%|███▎ | 1692/5200 [00:00<1:16:58, 1.32s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 17 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 35%|███▍ | 1795/5200 [00:00<1:10:13, 1.24s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 18 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 36%|███▋ | 1888/5200 [00:00<1:18:50, 1.43s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 19 - Current best internal CV score: 0.64317\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 38%|███▊ | 1990/5200 [00:00<1:12:59, 1.36s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 20 - Current best internal CV score: 0.64392\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 40%|████ | 2082/5200 [00:00<1:07:48, 1.30s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 21 - Current best internal CV score: 0.64392\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 42%|████▏ | 2193/5200 [00:00<1:13:34, 1.47s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 22 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 44%|████▍ | 2286/5200 [00:00<1:15:38, 1.56s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 23 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 46%|████▌ | 2393/5200 [00:00<1:06:45, 1.43s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 24 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 48%|████▊ | 2493/5200 [00:00<1:18:55, 1.75s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 25 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 50%|████▉ | 2586/5200 [00:00<52:42, 1.21s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 26 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 52%|█████▏ | 2688/5200 [00:00<1:40:47, 2.41s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 27 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 54%|█████▎ | 2789/5200 [00:00<53:18, 1.33s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 28 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 56%|█████▌ | 2889/5200 [00:00<1:06:12, 1.72s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 29 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 58%|█████▊ | 2991/5200 [00:00<1:25:08, 2.31s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 30 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 59%|█████▉ | 3091/5200 [00:00<44:32, 1.27s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 31 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 61%|██████▏ | 3188/5200 [00:00<38:43, 1.15s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 32 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 63%|██████▎ | 3293/5200 [00:00<33:04, 1.04s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 33 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 65%|██████▌ | 3393/5200 [00:00<34:03, 1.13s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 34 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 67%|██████▋ | 3487/5200 [00:00<51:12, 1.79s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 35 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 69%|██████▉ | 3600/5200 [1:14:42<1:10:39, 2.65s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 36 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 71%|███████ | 3690/5200 [00:00<31:57, 1.27s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 37 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 73%|███████▎ | 3788/5200 [00:00<33:18, 1.42s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 38 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 75%|███████▍ | 3890/5200 [00:00<34:09, 1.56s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 39 - Current best internal CV score: 0.64626\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 77%|███████▋ | 3995/5200 [00:00<38:16, 1.91s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 40 - Current best internal CV score: 0.64877\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 79%|███████▊ | 4086/5200 [00:00<36:41, 1.98s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 41 - Current best internal CV score: 0.64877\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 81%|████████ | 4194/5200 [00:00<24:16, 1.45s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 42 - Current best internal CV score: 0.64877\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 83%|████████▎ | 4294/5200 [00:00<20:33, 1.36s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 43 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 84%|████████▍ | 4392/5200 [00:00<23:18, 1.73s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 44 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 86%|████████▋ | 4486/5200 [00:00<16:45, 1.41s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 45 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 88%|████████▊ | 4590/5200 [00:00<14:30, 1.43s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 46 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 90%|█████████ | 4691/5200 [00:00<09:37, 1.14s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 47 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 92%|█████████▏| 4788/5200 [00:00<14:13, 2.07s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 48 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 94%|█████████▍| 4887/5200 [00:00<10:04, 1.93s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 49 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 96%|█████████▌| 4989/5200 [00:00<06:48, 1.94s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 50 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"GP Progress: 98%|█████████▊| 5093/5200 [00:00<02:11, 1.23s/pipeline]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generation 51 - Current best internal CV score: 0.64987\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": []
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Best pipeline: _gradient_boosting(input_df, 0.51000000000000001, 9.0, 0.28000000000000003)\n"
]
}
],
"source": [
"tpot = TPOT(generations=51, verbosity=2)\n",
"tpot.fit(X_train, y_train)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"0.62077627636352106"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tpot.score(X_test, y_test)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"tpot.export('tpot_contraceptive_pipeline.py')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:51:06,381:AutoML(1):7a3a10b65f5366b76f046add6da72a1c] Starting to create dummy predictions.\n",
"[INFO] [2016-08-16 07:51:06,408:AutoML(1):7a3a10b65f5366b76f046add6da72a1c] Finished creating dummy prediction 1/2.\n",
"[INFO] [2016-08-16 07:51:06,434:AutoML(1):7a3a10b65f5366b76f046add6da72a1c] Finished creating dummy prediction 2/2.\n",
"[INFO] [2016-08-16 07:51:07,022:AutoML(1):7a3a10b65f5366b76f046add6da72a1c] Start Ensemble with 599.35sec time left\n",
"[INFO] [2016-08-16 07:51:07,035:AutoML(1):7a3a10b65f5366b76f046add6da72a1c] Start SMAC with 599.34sec time left\n",
"[INFO] [2016-08-16 07:51:07,070:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Training default configurations on a subset of 364/1104 data points.\n",
"[INFO] [2016-08-16 07:51:07,079:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 3 on SUBSET with size 364 and time limit 180s.\n",
"[INFO] [2016-08-16 07:51:07,081:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.0\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 1\n",
" classifier:random_forest:min_samples_split, Value: 2\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.01\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:51:07,137:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.139344 1: 0.139344 2: 0.139344 3: 0.139344 4: 0.139344 5: 0.139344 6: 0.139344 7: 0.139344 8: 0.139344 9: 0.139344 10: 0.139344 11: 0.139344 12: 0.139344 13: 0.139344 14: 0.139344 15: 0.139344 16: 0.139344 17: 0.139344 18: 0.139344 19: 0.139344 20: 0.139344 21: 0.139344 22: 0.139344 23: 0.139344 24: 0.139344 25: 0.139344 26: 0.139344 27: 0.139344 28: 0.139344 29: 0.139344 30: 0.139344 31: 0.139344 32: 0.139344 33: 0.139344 34: 0.139344 35: 0.139344 36: 0.139344 37: 0.139344 38: 0.139344 39: 0.139344 40: 0.139344 41: 0.139344 42: 0.139344 43: 0.139344 44: 0.139344 45: 0.139344 46: 0.139344 47: 0.139344 48: 0.139344 49: 0.139344\n",
"\tMembers: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 1. 0.]\n",
"\tIdentifiers: (1, 1)\n",
"[INFO] [2016-08-16 07:51:07,145:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.139344\n",
"[INFO] [2016-08-16 07:51:07,147:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.083450 seconds\n",
"[INFO] [2016-08-16 07:51:07,152:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (2)!.\n",
"[INFO] [2016-08-16 07:51:07,154:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (2)!\n",
"[INFO] [2016-08-16 07:51:07,307:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 3. configuration on SUBSET. Duration 0.194043; loss 0.729508; status 1; additional run info: ;duration: 0.19404339790344238;num_run:00003 \n",
"[INFO] [2016-08-16 07:51:07,309:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished SUBSET training sucessfully with ratio 0.330000\n",
"[INFO] [2016-08-16 07:51:07,312:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 4 on SUBSET with size 364 and time limit 180s.\n",
"[INFO] [2016-08-16 07:51:07,313:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.0001\n",
" classifier:sgd:average, Value: True\n",
" classifier:sgd:eta0, Value: 0.01\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: optimal\n",
" classifier:sgd:loss, Value: hinge\n",
" classifier:sgd:n_iter, Value: 5\n",
" classifier:sgd:penalty, Value: l2\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.1\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:51:07,356:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 4. configuration on SUBSET. Duration 0.014617; loss 0.836066; status 1; additional run info: ;duration: 0.014616966247558594;num_run:00004 \n",
"[INFO] [2016-08-16 07:51:07,358:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished SUBSET training sucessfully with ratio 0.330000\n",
"[INFO] [2016-08-16 07:51:07,360:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 5 on SUBSET with size 364 and time limit 180s.\n",
"[INFO] [2016-08-16 07:51:07,361:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 1.0\n",
" classifier:extra_trees:min_samples_leaf, Value: 5\n",
" classifier:extra_trees:min_samples_split, Value: 5\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.1\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 2.0\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:51:07,516:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 5. configuration on SUBSET. Duration 0.127089; loss 1.098361; status 1; additional run info: ;duration: 0.1270890235900879;num_run:00005 \n",
"[INFO] [2016-08-16 07:51:07,518:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished SUBSET training sucessfully with ratio 0.330000\n",
"[INFO] [2016-08-16 07:51:07,521:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 6 on SUBSET with size 364 and time limit 180s.\n",
"[INFO] [2016-08-16 07:51:07,523:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gaussian_nb\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.1\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:51:07,562:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 6. configuration on SUBSET. Duration 0.012073; loss 0.766393; status 1; additional run info: ;duration: 0.012073278427124023;num_run:00006 \n",
"[INFO] [2016-08-16 07:51:07,564:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished SUBSET training sucessfully with ratio 0.330000\n",
"[INFO] [2016-08-16 07:51:07,567:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Metadata directory: /opt/conda/lib/python3.5/site-packages/autosklearn/metalearning/files/acc_metric_multiclass.classification_dense\n",
"[INFO] [2016-08-16 07:51:07,935:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Start calculating metafeatures for 7a3a10b65f5366b76f046add6da72a1c\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/numpy/lib/nanfunctions.py:1136: RuntimeWarning: Degrees of freedom <= 0 for slice.\n",
" warnings.warn(\"Degrees of freedom <= 0 for slice.\", RuntimeWarning)\n",
"/opt/conda/lib/python3.5/site-packages/numpy/lib/nanfunctions.py:675: RuntimeWarning: Mean of empty slice\n",
" warnings.warn(\"Mean of empty slice\", RuntimeWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:51:07,951:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Calculating Metafeatures (categorical attributes) took 0.02\n",
"[INFO] [2016-08-16 07:51:07,986:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Calculating Metafeatures (encoded attributes) took 0.02sec\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/validation.py:386: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and willraise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.\n",
" DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:51:08,099:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Time left for 7a3a10b65f5366b76f046add6da72a1c after finding initial configurations: 597.27sec\n",
"[INFO] [2016-08-16 07:51:08,102:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Metafeatures for dataset 7a3a10b65f5366b76f046add6da72a1cuiaeo\n",
" PercentageOfFeaturesWithMissingValues: 0.0\n",
" RatioNominalToNumerical: 0.0\n",
" ClassProbabilityMin: 0.22644927536231885\n",
" SymbolsMin: 0\n",
" ClassProbabilitySTD: 0.0822864030378\n",
" NumberOfFeaturesWithMissingValues: 0.0\n",
" NumberOfMissingValues: 0.0\n",
" ClassEntropy: 1.53938532217\n",
" KurtosisSTD: 3.04541281443\n",
" ClassProbabilityMax: 0.4266304347826087\n",
" NumberOfCategoricalFeatures: 0\n",
" LogDatasetRatio: -4.8094706495\n",
" LogInverseDatasetRatio: 4.8094706495\n",
" DatasetRatio: 0.008152173913043478\n",
" NumberOfInstancesWithMissingValues: 0.0\n",
" KurtosisMax: 9.05175671504\n",
" InverseDatasetRatio: 122.66666666666667\n",
" SkewnessMax: 3.32441825212\n",
" SymbolsMax: 0\n",
" ClassProbabilityMean: 0.333333333333\n",
" NumberOfFeatures: 9.0\n",
" SkewnessMin: -1.94653713851\n",
" SkewnessMean: -0.132110704661\n",
" NumberOfNumericFeatures: 9\n",
" LogNumberOfFeatures: 2.19722457734\n",
" RatioNumericalToNominal: 0.0\n",
" NumberOfInstances: 1104.0\n",
" KurtosisMean: 1.01017630883\n",
" PercentageOfInstancesWithMissingValues: 0.0\n",
" LandmarkRandomNodeLearner: 0.417541827542\n",
" NumberOfClasses: 3.0\n",
" SymbolsSum: 0.0\n",
" SymbolsSTD: 0\n",
" LogNumberOfInstances: 7.00669522684\n",
" SkewnessSTD: 1.49178078579\n",
" SymbolsMean: 0\n",
" KurtosisMin: -1.33713825174\n",
" PercentageOfMissingValues: 0.0\n",
"\n",
"[INFO] [2016-08-16 07:51:08,107:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] ['1018_acc', '1128_acc', '871_acc', '1166_acc', '57_acc', '821_acc', '18_acc', '1067_acc', '300_acc', '966_acc', '720_acc', '293_acc', '395_acc', '1056_acc', '1021_acc', '743_acc', '734_acc', '273_acc', '23_acc', '1049_acc', '181_acc', '1142_acc', '816_acc', '843_acc', '26_acc', '903_acc', '179_acc', '807_acc', '977_acc', '1040_acc', '979_acc', '31_acc', '1112_acc', '185_acc', '1050_acc', '741_acc', '728_acc', '737_acc', '1053_acc', '1002_acc', '401_acc', '881_acc', '1068_acc', '930_acc', '1000_acc', '822_acc', '1139_acc', '182_acc', '393_acc', '357_acc', '554_acc', '28_acc', '819_acc', '722_acc', '1146_acc', '866_acc', '991_acc', '1130_acc', '959_acc', '735_acc', '1134_acc', '38_acc', '914_acc', '6_acc', '904_acc', '845_acc', '1161_acc', '799_acc', '718_acc', '727_acc', '913_acc', '396_acc', '723_acc', '797_acc', '772_acc', '3_acc', '1020_acc', '44_acc', '934_acc', '180_acc', '958_acc', '803_acc', '1120_acc', '354_acc', '14_acc', '16_acc', '60_acc', '1138_acc', '21_acc', '833_acc', '912_acc', '971_acc', '22_acc', '953_acc', '184_acc', '993_acc', '12_acc', '897_acc', '752_acc', '391_acc', '806_acc', '24_acc', '1116_acc', '980_acc', '392_acc', '976_acc', '978_acc', '1069_acc', '1114_acc', '679_acc', '30_acc', '46_acc', '1119_acc', '761_acc', '917_acc', '389_acc', '1041_acc', '846_acc', '390_acc', '995_acc', '751_acc', '847_acc', '398_acc', '351_acc', '813_acc', '849_acc', '837_acc', '399_acc', '962_acc', '36_acc', '1019_acc', '7a3a10b65f5366b76f046add6da72a1cuiaeo', '32_acc', '715_acc', '923_acc', '1036_acc', '1111_acc', '740_acc', '901_acc', '910_acc', '823_acc']\n",
"[ERROR] [2016-08-16 07:51:09,164:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:51:09,350:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.270492 1: 0.270492 2: 0.270492 3: 0.270492 4: 0.270492 5: 0.270492 6: 0.270492 7: 0.270492 8: 0.270492 9: 0.270492 10: 0.270492 11: 0.270492 12: 0.270492 13: 0.270492 14: 0.270492 15: 0.270492 16: 0.270492 17: 0.270492 18: 0.270492 19: 0.270492 20: 0.270492 21: 0.270492 22: 0.270492 23: 0.270492 24: 0.270492 25: 0.270492 26: 0.270492 27: 0.270492 28: 0.270492 29: 0.270492 30: 0.270492 31: 0.270492 32: 0.270492 33: 0.270492 34: 0.270492 35: 0.270492 36: 0.270492 37: 0.270492 38: 0.270492 39: 0.270492 40: 0.270492 41: 0.270492 42: 0.270492 43: 0.270492 44: 0.270492 45: 0.270492 46: 0.270492 47: 0.270492 48: 0.270492 49: 0.270492\n",
"\tMembers: [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.98 0. 0.02 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 3)\n",
"[INFO] [2016-08-16 07:51:09,354:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.270492\n",
"[INFO] [2016-08-16 07:51:09,356:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.195199 seconds\n",
"[INFO] [2016-08-16 07:51:09,358:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (6)!.\n",
"[INFO] [2016-08-16 07:51:09,360:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (6)!\n",
"[INFO] [2016-08-16 07:51:44,407:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 7. configuration (default configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:44,410:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.0\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 1\n",
" classifier:random_forest:min_samples_split, Value: 2\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.01\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:51:44,633:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 7. configuration. Duration 0.185781; loss 0.725410; status 1; additional run info: ;duration: 0.18578052520751953;num_run:00007 \n",
"[INFO] [2016-08-16 07:51:44,636:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 8. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:44,638:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.027813080646332755\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 3\n",
" classifier:xgradient_boosting:min_child_weight, Value: 11\n",
" classifier:xgradient_boosting:n_estimators, Value: 100\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.2055753890049334\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.009992290843490832\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:51:44,758:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 8. configuration. Duration 0.092086; loss 0.696721; status 1; additional run info: ;duration: 0.0920858383178711;num_run:00008 \n",
"[INFO] [2016-08-16 07:51:44,761:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 9. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:44,763:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.0005401543374603144\n",
" classifier:sgd:average, Value: False\n",
" classifier:sgd:eta0, Value: 0.04377140004056303\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: optimal\n",
" classifier:sgd:loss, Value: log\n",
" classifier:sgd:n_iter, Value: 943\n",
" classifier:sgd:penalty, Value: l2\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0007887810786977907\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.17377268248209546\n",
" preprocessor:select_rates:mode, Value: fwe\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[ERROR] [2016-08-16 07:51:45,456:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:51:45,459:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 9. configuration. Duration 0.667637; loss 0.844262; status 1; additional run info: ;duration: 0.6676368713378906;num_run:00009 \n",
"[INFO] [2016-08-16 07:51:45,461:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 10. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:45,463:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.05602526032246091\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 8\n",
" classifier:gradient_boosting:max_features, Value: 4.526821406542622\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 2\n",
" classifier:gradient_boosting:min_samples_split, Value: 5\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.5698904089850234\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.014830428662911994\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.3041931851021964\n",
" preprocessor:select_rates:mode, Value: fpr\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:51:45,722:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.303279 1: 0.327869 2: 0.327869 3: 0.327869 4: 0.327869 5: 0.327869 6: 0.327869 7: 0.327869 8: 0.327869 9: 0.327869 10: 0.327869 11: 0.327869 12: 0.327869 13: 0.327869 14: 0.327869 15: 0.327869 16: 0.327869 17: 0.327869 18: 0.327869 19: 0.327869 20: 0.327869 21: 0.327869 22: 0.327869 23: 0.327869 24: 0.327869 25: 0.327869 26: 0.327869 27: 0.327869 28: 0.327869 29: 0.327869 30: 0.327869 31: 0.327869 32: 0.327869 33: 0.327869 34: 0.327869 35: 0.327869 36: 0.327869 37: 0.327869 38: 0.327869 39: 0.327869 40: 0.327869 41: 0.327869 42: 0.327869 43: 0.327869 44: 0.327869 45: 0.327869 46: 0.327869 47: 0.327869 48: 0.327869 49: 0.327869\n",
"\tMembers: [6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0.02 0.02]\n",
"\tIdentifiers: (1, 1) (1, 8) (1, 9)\n",
"[INFO] [2016-08-16 07:51:45,727:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.327869\n",
"[INFO] [2016-08-16 07:51:45,728:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.275878 seconds\n",
"[INFO] [2016-08-16 07:51:45,731:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (9)!.\n",
"[INFO] [2016-08-16 07:51:45,732:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (9)!\n",
"[INFO] [2016-08-16 07:51:46,850:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 10. configuration. Duration 1.343028; loss 0.713115; status 1; additional run info: ;duration: 1.3430280685424805;num_run:00010 \n",
"[INFO] [2016-08-16 07:51:46,853:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 11. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:46,855:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.7129561445167657\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 5\n",
" classifier:gradient_boosting:max_features, Value: 3.93371407131337\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 20\n",
" classifier:gradient_boosting:min_samples_split, Value: 4\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.5706975618082643\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.015787599946780754\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[ERROR] [2016-08-16 07:51:47,744:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:51:47,769:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 11. configuration. Duration 0.869216; loss 0.750000; status 1; additional run info: ;duration: 0.8692162036895752;num_run:00011 \n",
"[INFO] [2016-08-16 07:51:47,772:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 12. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:47,774:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.20066795319926028\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 7\n",
" classifier:gradient_boosting:max_features, Value: 3.9325869489731167\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 7\n",
" classifier:gradient_boosting:min_samples_split, Value: 11\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.9976532055747943\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00046199690730130277\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:51:48,105:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.303279 1: 0.327869 2: 0.327869 3: 0.327869 4: 0.327869 5: 0.327869 6: 0.327869 7: 0.327869 8: 0.327869 9: 0.327869 10: 0.327869 11: 0.327869 12: 0.327869 13: 0.327869 14: 0.327869 15: 0.327869 16: 0.327869 17: 0.327869 18: 0.327869 19: 0.327869 20: 0.327869 21: 0.327869 22: 0.327869 23: 0.327869 24: 0.327869 25: 0.327869 26: 0.327869 27: 0.327869 28: 0.327869 29: 0.327869 30: 0.327869 31: 0.327869 32: 0.327869 33: 0.327869 34: 0.327869 35: 0.327869 36: 0.327869 37: 0.327869 38: 0.327869 39: 0.327869 40: 0.327869 41: 0.327869 42: 0.327869 43: 0.327869 44: 0.327869 45: 0.327869 46: 0.327869 47: 0.327869 48: 0.327869 49: 0.327869\n",
"\tMembers: [6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0.02 0.02 0. ]\n",
"\tIdentifiers: (1, 1) (1, 8) (1, 9)\n",
"[INFO] [2016-08-16 07:51:48,111:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.327869\n",
"[INFO] [2016-08-16 07:51:48,113:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.372244 seconds\n",
"[INFO] [2016-08-16 07:51:48,115:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:51:48,854:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 12. configuration. Duration 1.036347; loss 0.750000; status 1; additional run info: ;duration: 1.0363473892211914;num_run:00012 \n",
"[INFO] [2016-08-16 07:51:48,857:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 13. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:48,859:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.07803133051910395\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 10\n",
" classifier:xgradient_boosting:min_child_weight, Value: 2\n",
" classifier:xgradient_boosting:n_estimators, Value: 100\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.41767000017936246\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.006665191731327244\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:51:49,132:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 13. configuration. Duration 0.239706; loss 0.680328; status 1; additional run info: ;duration: 0.23970603942871094;num_run:00013 \n",
"[INFO] [2016-08-16 07:51:49,135:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 14. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:49,136:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.320190941684566\n",
" classifier:adaboost:max_depth, Value: 3\n",
" classifier:adaboost:n_estimators, Value: 178\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:51:49,761:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 14. configuration. Duration 0.583321; loss 0.692623; status 1; additional run info: ;duration: 0.5833206176757812;num_run:00014 \n",
"[INFO] [2016-08-16 07:51:49,764:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 15. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:49,767:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 3.2877276772399777\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 3\n",
" classifier:random_forest:min_samples_split, Value: 6\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0059197487245511455\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run3\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:51:50,127:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:51:50,145:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 15. configuration. Duration 0.342097; loss 0.721311; status 1; additional run info: ;duration: 0.34209728240966797;num_run:00015 \n",
"[INFO] [2016-08-16 07:51:50,147:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 16. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:50,148:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 1.0081104516473922\n",
" classifier:adaboost:max_depth, Value: 6\n",
" classifier:adaboost:n_estimators, Value: 468\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 1.1828431725901418\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.0022792606924326923\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:51:50,571:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.319672 1: 0.344262 2: 0.344262 3: 0.344262 4: 0.344262 5: 0.344262 6: 0.344262 7: 0.344262 8: 0.344262 9: 0.344262 10: 0.344262 11: 0.344262 12: 0.344262 13: 0.344262 14: 0.344262 15: 0.344262 16: 0.344262 17: 0.344262 18: 0.344262 19: 0.344262 20: 0.344262 21: 0.344262 22: 0.344262 23: 0.344262 24: 0.344262 25: 0.344262 26: 0.344262 27: 0.344262 28: 0.344262 29: 0.344262 30: 0.344262 31: 0.344262 32: 0.344262 33: 0.344262 34: 0.344262 35: 0.344262 36: 0.344262 37: 0.344262 38: 0.344262 39: 0.344262 40: 0.344262 41: 0.344262 42: 0.344262 43: 0.344262 44: 0.344262 45: 0.344262 46: 0.344262 47: 0.344262 48: 0.344262 49: 0.344262\n",
"\tMembers: [11, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. ]\n",
"\tIdentifiers: (1, 1) (1, 9) (1, 13)\n",
"[INFO] [2016-08-16 07:51:50,576:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.344262\n",
"[INFO] [2016-08-16 07:51:50,578:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.453958 seconds\n",
"[INFO] [2016-08-16 07:51:50,580:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (14)!.\n",
"[INFO] [2016-08-16 07:51:50,582:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (14)!\n",
"[ERROR] [2016-08-16 07:51:50,589:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:51:51,128:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.319672 1: 0.344262 2: 0.344262 3: 0.344262 4: 0.344262 5: 0.344262 6: 0.344262 7: 0.344262 8: 0.344262 9: 0.344262 10: 0.344262 11: 0.344262 12: 0.344262 13: 0.344262 14: 0.344262 15: 0.344262 16: 0.344262 17: 0.344262 18: 0.344262 19: 0.344262 20: 0.344262 21: 0.344262 22: 0.344262 23: 0.344262 24: 0.344262 25: 0.344262 26: 0.344262 27: 0.344262 28: 0.344262 29: 0.344262 30: 0.344262 31: 0.344262 32: 0.344262 33: 0.344262 34: 0.344262 35: 0.344262 36: 0.344262 37: 0.344262 38: 0.344262 39: 0.344262 40: 0.344262 41: 0.344262 42: 0.344262 43: 0.344262 44: 0.344262 45: 0.344262 46: 0.344262 47: 0.344262 48: 0.344262 49: 0.344262\n",
"\tMembers: [11, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 9) (1, 13)\n",
"[INFO] [2016-08-16 07:51:51,132:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.344262\n",
"[INFO] [2016-08-16 07:51:51,133:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.548280 seconds\n",
"[INFO] [2016-08-16 07:51:51,135:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:51:51,160:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 16. configuration. Duration 0.955629; loss 0.786885; status 1; additional run info: ;duration: 0.9556291103363037;num_run:00016 \n",
"[INFO] [2016-08-16 07:51:51,163:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 17. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:51,164:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: liblinear_svc\n",
" classifier:liblinear_svc:C, Value: 16.445846112636396\n",
" classifier:liblinear_svc:dual, Constant: False\n",
" classifier:liblinear_svc:fit_intercept, Constant: True\n",
" classifier:liblinear_svc:intercept_scaling, Constant: 1\n",
" classifier:liblinear_svc:loss, Value: squared_hinge\n",
" classifier:liblinear_svc:multi_class, Constant: ovr\n",
" classifier:liblinear_svc:penalty, Value: l2\n",
" classifier:liblinear_svc:tol, Value: 5.031987397253124e-05\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.018681895366958703\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: nystroem_sampler\n",
" preprocessor:nystroem_sampler:gamma, Value: 0.2781157712712954\n",
" preprocessor:nystroem_sampler:kernel, Value: rbf\n",
" preprocessor:nystroem_sampler:n_components, Value: 8238\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/kernel_approximation.py:463: UserWarning: n_components > n_samples. This is not possible.\n",
"n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n",
" warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:51:52,795:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 17. configuration. Duration 1.530154; loss 0.766393; status 1; additional run info: ;duration: 1.5301539897918701;num_run:00017 \n",
"[INFO] [2016-08-16 07:51:52,799:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 18. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:52,801:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.0018057707469332856\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: squared_hinge\n",
" classifier:passive_aggressive:n_iter, Value: 572\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.17556429026221484\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: nystroem_sampler\n",
" preprocessor:nystroem_sampler:gamma, Value: 0.35884375549922065\n",
" preprocessor:nystroem_sampler:kernel, Value: rbf\n",
" preprocessor:nystroem_sampler:n_components, Value: 481\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run4\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:51:53,158:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:51:53,702:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.319672 1: 0.344262 2: 0.344262 3: 0.344262 4: 0.344262 5: 0.344262 6: 0.344262 7: 0.344262 8: 0.344262 9: 0.344262 10: 0.344262 11: 0.344262 12: 0.344262 13: 0.344262 14: 0.344262 15: 0.344262 16: 0.344262 17: 0.344262 18: 0.344262 19: 0.344262 20: 0.344262 21: 0.344262 22: 0.344262 23: 0.344262 24: 0.344262 25: 0.344262 26: 0.344262 27: 0.344262 28: 0.344262 29: 0.344262 30: 0.344262 31: 0.344262 32: 0.344262 33: 0.344262 34: 0.344262 35: 0.344262 36: 0.344262 37: 0.344262 38: 0.344262 39: 0.344262 40: 0.344262 41: 0.344262 42: 0.344262 43: 0.344262 44: 0.344262 45: 0.344262 46: 0.344262 47: 0.344262 48: 0.344262 49: 0.344262\n",
"\tMembers: [11, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 9) (1, 13)\n",
"[INFO] [2016-08-16 07:51:53,706:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.344262\n",
"[INFO] [2016-08-16 07:51:53,708:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.561095 seconds\n",
"[INFO] [2016-08-16 07:51:53,711:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:51:55,878:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 18. configuration. Duration 3.016266; loss 0.782787; status 1; additional run info: ;duration: 3.016265869140625;num_run:00018 \n",
"[INFO] [2016-08-16 07:51:55,881:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 19. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:55,883:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.11209822960801213\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 3\n",
" classifier:xgradient_boosting:min_child_weight, Value: 14\n",
" classifier:xgradient_boosting:n_estimators, Value: 100\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.6277896866125797\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.1847798648480348\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:51:56,032:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 19. configuration. Duration 0.114962; loss 0.688525; status 1; additional run info: ;duration: 0.11496233940124512;num_run:00019 \n",
"[INFO] [2016-08-16 07:51:56,034:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 20. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:51:56,036:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.0036975653885940544\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: hinge\n",
" classifier:passive_aggressive:n_iter, Value: 326\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 0.6227804363658538\n",
" preprocessor:kitchen_sinks:n_components, Value: 1821\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run4\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:51:57,729:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:51:58,388:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.319672 1: 0.344262 2: 0.344262 3: 0.344262 4: 0.344262 5: 0.344262 6: 0.344262 7: 0.344262 8: 0.344262 9: 0.344262 10: 0.344262 11: 0.344262 12: 0.344262 13: 0.344262 14: 0.344262 15: 0.344262 16: 0.344262 17: 0.344262 18: 0.344262 19: 0.344262 20: 0.344262 21: 0.344262 22: 0.344262 23: 0.344262 24: 0.344262 25: 0.344262 26: 0.344262 27: 0.344262 28: 0.344262 29: 0.344262 30: 0.344262 31: 0.344262 32: 0.344262 33: 0.344262 34: 0.344262 35: 0.344262 36: 0.344262 37: 0.344262 38: 0.344262 39: 0.344262 40: 0.344262 41: 0.344262 42: 0.344262 43: 0.344262 44: 0.344262 45: 0.344262 46: 0.344262 47: 0.344262 48: 0.344262 49: 0.344262\n",
"\tMembers: [11, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 9) (1, 13)\n",
"[INFO] [2016-08-16 07:51:58,393:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.344262\n",
"[INFO] [2016-08-16 07:51:58,395:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.670073 seconds\n",
"[INFO] [2016-08-16 07:51:58,396:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:52:01,067:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 20. configuration. Duration 4.962999; loss 0.790984; status 1; additional run info: ;duration: 4.962998867034912;num_run:00020 \n",
"[INFO] [2016-08-16 07:52:01,070:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 21. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:01,072:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.4120597789233855\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 4.758121621535983\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 16\n",
" classifier:gradient_boosting:min_samples_split, Value: 6\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.9698657674324143\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.45394720185957155\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: gini\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 2.9979113303712377\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 5\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 17\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:01,773:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 21. configuration. Duration 0.646324; loss 0.668033; status 1; additional run info: ;duration: 0.6463239192962646;num_run:00021 \n",
"[INFO] [2016-08-16 07:52:01,775:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 22. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:01,776:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 9.284928341343884\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.15973168347756866\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 1.0504384005484382\n",
" preprocessor:kitchen_sinks:n_components, Value: 235\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:688: UserWarning: Variables are collinear\n",
" warnings.warn(\"Variables are collinear\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:02,011:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 22. configuration. Duration 0.173836; loss 0.831967; status 1; additional run info: ;duration: 0.17383575439453125;num_run:00022 \n",
"[INFO] [2016-08-16 07:52:02,014:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 23. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:02,016:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: liblinear_svc\n",
" classifier:liblinear_svc:C, Value: 62.21750028494345\n",
" classifier:liblinear_svc:dual, Constant: False\n",
" classifier:liblinear_svc:fit_intercept, Constant: True\n",
" classifier:liblinear_svc:intercept_scaling, Constant: 1\n",
" classifier:liblinear_svc:loss, Value: squared_hinge\n",
" classifier:liblinear_svc:multi_class, Constant: ovr\n",
" classifier:liblinear_svc:penalty, Value: l2\n",
" classifier:liblinear_svc:tol, Value: 0.0002178180133457927\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.028777961041830738\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"You are already timing task: index_run4\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:02,415:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:02,756:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 23. configuration. Duration 0.708847; loss 0.717213; status 1; additional run info: ;duration: 0.7088468074798584;num_run:00023 \n",
"[INFO] [2016-08-16 07:52:02,759:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 24. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:02,761:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 1.0\n",
" classifier:extra_trees:min_samples_leaf, Value: 1\n",
" classifier:extra_trees:min_samples_split, Value: 2\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: gini\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 1.0\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 1\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 2\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:03,174:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.348361 2: 0.372951 3: 0.372951 4: 0.372951 5: 0.372951 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.372951 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [19, 17, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.02 0. 0.02 0. ]\n",
"\tIdentifiers: (1, 1) (1, 9) (1, 19) (1, 21)\n",
"[INFO] [2016-08-16 07:52:03,179:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:03,181:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.769840 seconds\n",
"[INFO] [2016-08-16 07:52:03,184:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (22)!.\n",
"[INFO] [2016-08-16 07:52:03,186:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (22)!\n",
"[INFO] [2016-08-16 07:52:03,188:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 24. configuration. Duration 0.368447; loss 0.819672; status 1; additional run info: ;duration: 0.36844706535339355;num_run:00024 \n",
"[INFO] [2016-08-16 07:52:03,190:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 25. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:03,192:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.08921861937738111\n",
" classifier:adaboost:max_depth, Value: 8\n",
" classifier:adaboost:n_estimators, Value: 473\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.46025920786341173\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[ERROR] [2016-08-16 07:52:03,194:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:04,139:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.348361 2: 0.372951 3: 0.372951 4: 0.372951 5: 0.372951 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.372951 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [19, 17, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.02 0. 0.02 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 9) (1, 19) (1, 21)\n",
"[INFO] [2016-08-16 07:52:04,146:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:04,148:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.958292 seconds\n",
"[INFO] [2016-08-16 07:52:04,150:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:52:07,532:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 25. configuration. Duration 4.270654; loss 0.758197; status 1; additional run info: ;duration: 4.270654201507568;num_run:00025 \n",
"[INFO] [2016-08-16 07:52:07,534:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 26. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:07,536:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.20883213948290555\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 5\n",
" classifier:gradient_boosting:max_features, Value: 3.950232147023257\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 2\n",
" classifier:gradient_boosting:min_samples_split, Value: 12\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.7000722592104036\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.20504937335658277\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run5\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:08,175:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:08,396:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 26. configuration. Duration 0.814675; loss 0.725410; status 1; additional run info: ;duration: 0.8146753311157227;num_run:00026 \n",
"[INFO] [2016-08-16 07:52:08,399:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 27. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:08,401:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 242\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 0.0011339574479631279\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.004889144803037605\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 8\n",
" preprocessor:gem:precond, Value: 0.39344596751061517\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:387: UserWarning: Variables are collinear.\n",
" warnings.warn(\"Variables are collinear.\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:08,526:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 27. configuration. Duration 0.082217; loss 0.758197; status 1; additional run info: ;duration: 0.08221697807312012;num_run:00027 \n",
"[INFO] [2016-08-16 07:52:08,528:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 28. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:08,530:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.09705831458471066\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 7\n",
" classifier:gradient_boosting:max_features, Value: 4.283409813794932\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 16\n",
" classifier:gradient_boosting:min_samples_split, Value: 4\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.9843077531242675\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.1149368954241468\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 11920.402652363826\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.0001606219620686348\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:09,088:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.348361 2: 0.372951 3: 0.372951 4: 0.372951 5: 0.372951 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.372951 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [19, 17, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.02 0. 0.02 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 9) (1, 19) (1, 21)\n",
"[INFO] [2016-08-16 07:52:09,095:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:09,098:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 0.926688 seconds\n",
"[INFO] [2016-08-16 07:52:09,099:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:09,573:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 28. configuration. Duration 0.994685; loss 0.692623; status 1; additional run info: ;duration: 0.9946849346160889;num_run:00028 \n",
"[INFO] [2016-08-16 07:52:09,575:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 29. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:09,576:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 295.4185183083245\n",
" classifier:libsvm_svc:gamma, Value: 0.05098015552849704\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 0.01616175671031427\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 216\n",
" preprocessor:feature_agglomeration:pooling_func, Value: mean\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:09,760:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 29. configuration. Duration 0.153612; loss 0.799180; status 1; additional run info: ;duration: 0.1536116600036621;num_run:00029 \n",
"[INFO] [2016-08-16 07:52:09,762:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 30. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:09,764:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.10595850641686415\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 6\n",
" classifier:gradient_boosting:max_features, Value: 4.073047765066454\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 14\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.3730003562382372\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0007311404576904489\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 1.7764018125840484\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 4\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 4\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:10,817:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 30. configuration. Duration 0.999384; loss 0.692623; status 1; additional run info: ;duration: 0.9993844032287598;num_run:00030 \n",
"[INFO] [2016-08-16 07:52:10,819:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 31. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:10,820:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 2.714684194470519\n",
" classifier:extra_trees:min_samples_leaf, Value: 1\n",
" classifier:extra_trees:min_samples_split, Value: 9\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.17960116136139503\n",
" preprocessor:select_rates:mode, Value: fwe\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:11,048:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 31. configuration. Duration 0.188849; loss 0.684426; status 1; additional run info: ;duration: 0.18884897232055664;num_run:00031 \n",
"[INFO] [2016-08-16 07:52:11,050:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 32. configuration (meta-learning configuration) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:11,052:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 2.3779010566365115\n",
" classifier:extra_trees:min_samples_leaf, Value: 2\n",
" classifier:extra_trees:min_samples_split, Value: 4\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.004305770290473698\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.30934089491790895\n",
" preprocessor:select_rates:mode, Value: fdr\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run5\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:11,111:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:11,273:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 32. configuration. Duration 0.179694; loss 0.717213; status 1; additional run info: ;duration: 0.17969441413879395;num_run:00032 \n",
"[INFO] [2016-08-16 07:52:11,331:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 26 training points for SMAC.\n",
"[INFO] [2016-08-16 07:52:12,245:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.352459 2: 0.364754 3: 0.364754 4: 0.364754 5: 0.364754 6: 0.364754 7: 0.364754 8: 0.364754 9: 0.364754 10: 0.364754 11: 0.364754 12: 0.364754 13: 0.364754 14: 0.364754 15: 0.364754 16: 0.364754 17: 0.364754 18: 0.364754 19: 0.364754 20: 0.364754 21: 0.364754 22: 0.364754 23: 0.364754 24: 0.364754 25: 0.364754 26: 0.364754 27: 0.364754 28: 0.364754 29: 0.364754 30: 0.364754 31: 0.364754 32: 0.364754 33: 0.364754 34: 0.364754 35: 0.364754 36: 0.364754 37: 0.364754 38: 0.364754 39: 0.364754 40: 0.364754 41: 0.364754 42: 0.364754 43: 0.364754 44: 0.364754 45: 0.364754 46: 0.364754 47: 0.364754 48: 0.364754 49: 0.364754\n",
"\tMembers: [19, 29, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0.02]\n",
"\tIdentifiers: (1, 1) (1, 21) (1, 27) (1, 31)\n",
"[INFO] [2016-08-16 07:52:12,250:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.364754\n",
"[INFO] [2016-08-16 07:52:12,251:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.144620 seconds\n",
"[INFO] [2016-08-16 07:52:12,254:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (31)!.\n",
"[INFO] [2016-08-16 07:52:12,255:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (31)!\n",
"[ERROR] [2016-08-16 07:52:12,262:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:13,454:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.352459 2: 0.364754 3: 0.364754 4: 0.364754 5: 0.364754 6: 0.364754 7: 0.364754 8: 0.364754 9: 0.364754 10: 0.364754 11: 0.364754 12: 0.364754 13: 0.364754 14: 0.364754 15: 0.364754 16: 0.364754 17: 0.364754 18: 0.364754 19: 0.364754 20: 0.364754 21: 0.364754 22: 0.364754 23: 0.364754 24: 0.364754 25: 0.364754 26: 0.364754 27: 0.364754 28: 0.364754 29: 0.364754 30: 0.364754 31: 0.364754 32: 0.364754 33: 0.364754 34: 0.364754 35: 0.364754 36: 0.364754 37: 0.364754 38: 0.364754 39: 0.364754 40: 0.364754 41: 0.364754 42: 0.364754 43: 0.364754 44: 0.364754 45: 0.364754 46: 0.364754 47: 0.364754 48: 0.364754 49: 0.364754\n",
"\tMembers: [19, 29, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0.02 0. ]\n",
"\tIdentifiers: (1, 1) (1, 21) (1, 27) (1, 31)\n",
"[INFO] [2016-08-16 07:52:13,459:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.364754\n",
"[INFO] [2016-08-16 07:52:13,460:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.202093 seconds\n",
"[INFO] [2016-08-16 07:52:13,462:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:52:20,702:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 9.36969 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:52:20,708:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 33. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:20,709:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.412059778923\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 4.75812162154\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 16\n",
" classifier:gradient_boosting:min_samples_split, Value: 6\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 0.969865767432\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.45394720186\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.1\n",
" preprocessor:select_rates:mode, Value: fpr\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:21,271:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 33. configuration. Duration: 0.517077; loss: 0.852459; status 1; additional run info: ;duration: 0.5170767307281494;num_run:00033 \n",
"[INFO] [2016-08-16 07:52:21,278:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 34. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:21,279:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 2.33621210907\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 5\n",
" classifier:random_forest:min_samples_split, Value: 5\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.0252216048729\n",
" preprocessor:select_rates:mode, Value: fpr\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run6\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:21,488:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:21,854:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 34. configuration. Duration: 0.528580; loss: 0.688525; status 1; additional run info: ;duration: 0.5285797119140625;num_run:00034 \n",
"[INFO] [2016-08-16 07:52:21,863:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 35. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:21,865:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.115184186584\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 5\n",
" classifier:gradient_boosting:max_features, Value: 2.96707210199\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 7\n",
" classifier:gradient_boosting:min_samples_split, Value: 17\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 340\n",
" classifier:gradient_boosting:subsample, Value: 0.212660691858\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00983236043669\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:52:22,708:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.352459 2: 0.364754 3: 0.364754 4: 0.364754 5: 0.364754 6: 0.364754 7: 0.364754 8: 0.364754 9: 0.364754 10: 0.364754 11: 0.364754 12: 0.364754 13: 0.364754 14: 0.364754 15: 0.364754 16: 0.364754 17: 0.364754 18: 0.364754 19: 0.364754 20: 0.364754 21: 0.364754 22: 0.364754 23: 0.364754 24: 0.364754 25: 0.364754 26: 0.364754 27: 0.364754 28: 0.364754 29: 0.364754 30: 0.364754 31: 0.364754 32: 0.364754 33: 0.364754 34: 0.364754 35: 0.364754 36: 0.364754 37: 0.364754 38: 0.364754 39: 0.364754 40: 0.364754 41: 0.364754 42: 0.364754 43: 0.364754 44: 0.364754 45: 0.364754 46: 0.364754 47: 0.364754 48: 0.364754 49: 0.364754\n",
"\tMembers: [19, 29, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0.02 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 21) (1, 27) (1, 31)\n",
"[INFO] [2016-08-16 07:52:22,719:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.364754\n",
"[INFO] [2016-08-16 07:52:22,724:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.239119 seconds\n",
"[INFO] [2016-08-16 07:52:22,726:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run6\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:24,739:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:25,919:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.352459 2: 0.364754 3: 0.364754 4: 0.364754 5: 0.364754 6: 0.364754 7: 0.364754 8: 0.364754 9: 0.364754 10: 0.364754 11: 0.364754 12: 0.364754 13: 0.364754 14: 0.364754 15: 0.364754 16: 0.364754 17: 0.364754 18: 0.364754 19: 0.364754 20: 0.364754 21: 0.364754 22: 0.364754 23: 0.364754 24: 0.364754 25: 0.364754 26: 0.364754 27: 0.364754 28: 0.364754 29: 0.364754 30: 0.364754 31: 0.364754 32: 0.364754 33: 0.364754 34: 0.364754 35: 0.364754 36: 0.364754 37: 0.364754 38: 0.364754 39: 0.364754 40: 0.364754 41: 0.364754 42: 0.364754 43: 0.364754 44: 0.364754 45: 0.364754 46: 0.364754 47: 0.364754 48: 0.364754 49: 0.364754\n",
"\tMembers: [19, 29, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0.02 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 21) (1, 27) (1, 31)\n",
"[INFO] [2016-08-16 07:52:25,927:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.364754\n",
"[INFO] [2016-08-16 07:52:25,928:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.193334 seconds\n",
"[INFO] [2016-08-16 07:52:25,930:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:52:27,825:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 35. configuration. Duration: 5.889068; loss: 0.774590; status 1; additional run info: ;duration: 5.889067888259888;num_run:00035 \n",
"[INFO] [2016-08-16 07:52:27,832:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 36. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:27,833:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.041226567609\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 1\n",
" classifier:xgradient_boosting:n_estimators, Value: 363\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.703839251742\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: manhattan\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 17\n",
" preprocessor:feature_agglomeration:pooling_func, Value: mean\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run6\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:27,943:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:28,773:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 36. configuration. Duration: 0.905270; loss: 0.737705; status 1; additional run info: ;duration: 0.9052698612213135;num_run:00036 \n",
"[INFO] [2016-08-16 07:52:28,780:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 37. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:28,782:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.101770112695\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:29,137:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.331967 1: 0.352459 2: 0.364754 3: 0.364754 4: 0.364754 5: 0.364754 6: 0.364754 7: 0.364754 8: 0.364754 9: 0.364754 10: 0.364754 11: 0.364754 12: 0.364754 13: 0.364754 14: 0.364754 15: 0.364754 16: 0.364754 17: 0.364754 18: 0.364754 19: 0.364754 20: 0.364754 21: 0.364754 22: 0.364754 23: 0.364754 24: 0.364754 25: 0.364754 26: 0.364754 27: 0.364754 28: 0.364754 29: 0.364754 30: 0.364754 31: 0.364754 32: 0.364754 33: 0.364754 34: 0.364754 35: 0.364754 36: 0.364754 37: 0.364754 38: 0.364754 39: 0.364754 40: 0.364754 41: 0.364754 42: 0.364754 43: 0.364754 44: 0.364754 45: 0.364754 46: 0.364754 47: 0.364754 48: 0.364754 49: 0.364754\n",
"\tMembers: [19, 29, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.94 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0.02 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 21) (1, 27) (1, 31)\n",
"[INFO] [2016-08-16 07:52:29,145:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.364754\n",
"[INFO] [2016-08-16 07:52:29,148:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.208505 seconds\n",
"[INFO] [2016-08-16 07:52:29,151:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:52:29,367:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 37. configuration. Duration: 0.539897; loss: 0.672131; status 1; additional run info: ;duration: 0.5398967266082764;num_run:00037 \n",
"[INFO] [2016-08-16 07:52:29,374:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 38. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:29,376:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: entropy\n",
" classifier:decision_tree:max_depth, Value: 0.743842913028\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 10\n",
" classifier:decision_tree:min_samples_split, Value: 5\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:52:29,529:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 38. configuration. Duration: 0.124468; loss: 0.823770; status 1; additional run info: ;duration: 0.12446784973144531;num_run:00038 \n",
"[INFO] [2016-08-16 07:52:29,535:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 39. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:29,537:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 5\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0111357313386\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:30,044:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 39. configuration. Duration: 0.463418; loss: 0.668033; status 1; additional run info: ;duration: 0.46341848373413086;num_run:00039 \n",
"[INFO] [2016-08-16 07:52:30,050:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 40. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:30,051:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.0516850893531\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 220\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 57.5873515211\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:52:30,399:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 40. configuration. Duration: 0.305610; loss: 0.663934; status 1; additional run info: ;duration: 0.30560970306396484;num_run:00040 \n",
"[INFO] [2016-08-16 07:52:30,463:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 34 training points for SMAC.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run6\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:31,165:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:32,628:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.336066 1: 0.356557 2: 0.368852 3: 0.372951 4: 0.372951 5: 0.372951 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.372951 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [38, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.92 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02]\n",
"\tIdentifiers: (1, 1) (1, 14) (1, 40)\n",
"[INFO] [2016-08-16 07:52:32,634:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:32,636:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.474932 seconds\n",
"[INFO] [2016-08-16 07:52:32,639:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (40)!.\n",
"[INFO] [2016-08-16 07:52:32,641:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (40)!\n",
"[INFO] [2016-08-16 07:52:42,285:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 11.821 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:52:42,290:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 41. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:42,292:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0137961211181\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:42,801:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 41. configuration. Duration: 0.466165; loss: 0.688525; status 1; additional run info: ;duration: 0.46616458892822266;num_run:00041 \n",
"[INFO] [2016-08-16 07:52:42,807:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 42. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:42,808:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 4.61536913393e-05\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: squared_hinge\n",
" classifier:passive_aggressive:n_iter, Value: 101\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00067138318249\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: cube\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 116, in _ica_par\n",
" warnings.warn('FastICA did not converge. Consider increasing '\n",
"UserWarning: FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:44,030:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 42. configuration. Duration: 1.216230; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:52:44,037:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 43. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:44,039:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.1\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 3\n",
" classifier:xgradient_boosting:min_child_weight, Value: 1\n",
" classifier:xgradient_boosting:n_estimators, Value: 100\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:44,152:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 43. configuration. Duration: 0.086444; loss: 0.684426; status 1; additional run info: ;duration: 0.08644366264343262;num_run:00043 \n",
"[INFO] [2016-08-16 07:52:44,157:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 44. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:44,159:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: gini\n",
" classifier:decision_tree:max_depth, Value: 1.94593017712\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 18\n",
" classifier:decision_tree:min_samples_split, Value: 2\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: gini\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 3.12809311606\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 2\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 16\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:44,352:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 44. configuration. Duration: 0.158649; loss: 0.741803; status 1; additional run info: ;duration: 0.15864944458007812;num_run:00044 \n",
"[INFO] [2016-08-16 07:52:44,357:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 45. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:44,359:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 1.0\n",
" classifier:extra_trees:min_samples_leaf, Value: 1\n",
" classifier:extra_trees:min_samples_split, Value: 2\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:44,586:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 45. configuration. Duration: 0.184005; loss: 0.750000; status 1; additional run info: ;duration: 0.184004545211792;num_run:00045 \n",
"[INFO] [2016-08-16 07:52:44,592:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 46. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:44,593:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 110\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 0.0133662241448\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.415603644849\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: cube\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[ERROR] [2016-08-16 07:52:44,681:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 116, in _ica_par\n",
" warnings.warn('FastICA did not converge. Consider increasing '\n",
"UserWarning: FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:45,863:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 46. configuration. Duration: 1.265523; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:52:45,869:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 47. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:45,882:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 1.0\n",
" classifier:extra_trees:min_samples_leaf, Value: 1\n",
" classifier:extra_trees:min_samples_split, Value: 2\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:46,159:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 47. configuration. Duration: 0.230003; loss: 0.750000; status 1; additional run info: ;duration: 0.23000264167785645;num_run:00047 \n",
"[INFO] [2016-08-16 07:52:46,166:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 48. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:46,168:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.000472573449469\n",
" classifier:sgd:average, Value: True\n",
" classifier:sgd:eta0, Value: 0.0582594738491\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: optimal\n",
" classifier:sgd:loss, Value: squared_hinge\n",
" classifier:sgd:n_iter, Value: 413\n",
" classifier:sgd:penalty, Value: l2\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.392993679011\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 1581.49831115\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.00427826080249\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:52:46,192:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.336066 1: 0.356557 2: 0.368852 3: 0.372951 4: 0.372951 5: 0.372951 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.372951 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [38, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.92 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 14) (1, 40)\n",
"[INFO] [2016-08-16 07:52:46,197:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:46,199:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.525202 seconds\n",
"[INFO] [2016-08-16 07:52:46,200:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:46,547:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 48. configuration. Duration: 0.351487; loss: 0.795082; status 1; additional run info: ;duration: 0.3514871597290039;num_run:00048 \n",
"[INFO] [2016-08-16 07:52:46,553:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 49. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:46,554:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.1\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 3\n",
" classifier:xgradient_boosting:min_child_weight, Value: 1\n",
" classifier:xgradient_boosting:n_estimators, Value: 100\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:46,670:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 49. configuration. Duration: 0.089138; loss: 0.684426; status 1; additional run info: ;duration: 0.08913803100585938;num_run:00049 \n",
"[INFO] [2016-08-16 07:52:46,675:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 50. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:46,677:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.00015904480464\n",
" classifier:sgd:average, Value: False\n",
" classifier:sgd:eta0, Value: 0.0296347437049\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:l1_ratio, Value: 3.76000156664e-05\n",
" classifier:sgd:learning_rate, Value: invscaling\n",
" classifier:sgd:loss, Value: hinge\n",
" classifier:sgd:n_iter, Value: 41\n",
" classifier:sgd:penalty, Value: elasticnet\n",
" classifier:sgd:power_t, Value: 0.794062565613\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0144083482858\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: nystroem_sampler\n",
" preprocessor:nystroem_sampler:kernel, Value: cosine\n",
" preprocessor:nystroem_sampler:n_components, Value: 1515\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/kernel_approximation.py:463: UserWarning: n_components > n_samples. This is not possible.\n",
"n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n",
" warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:47,742:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 50. configuration. Duration: 0.984060; loss: 0.860656; status 1; additional run info: ;duration: 0.9840602874755859;num_run:00050 \n",
"[INFO] [2016-08-16 07:52:47,750:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 51. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:47,752:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.1\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 3\n",
" classifier:xgradient_boosting:min_child_weight, Value: 1\n",
" classifier:xgradient_boosting:n_estimators, Value: 100\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:47,889:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 51. configuration. Duration: 0.097456; loss: 0.684426; status 1; additional run info: ;duration: 0.09745645523071289;num_run:00051 \n",
"[INFO] [2016-08-16 07:52:47,897:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 52. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:47,899:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: k_nearest_neighbors\n",
" classifier:k_nearest_neighbors:n_neighbors, Value: 20\n",
" classifier:k_nearest_neighbors:p, Value: 1\n",
" classifier:k_nearest_neighbors:weights, Value: uniform\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:52:47,956:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 52. configuration. Duration: 0.024106; loss: 0.692623; status 1; additional run info: ;duration: 0.024105548858642578;num_run:00052 \n",
"[INFO] [2016-08-16 07:52:47,962:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 53. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:47,963:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 1.67205021017\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 8\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 6\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"You are already timing task: index_run7\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:48,214:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:48,634:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 53. configuration. Duration: 0.610161; loss: 0.700820; status 1; additional run info: ;duration: 0.610161304473877;num_run:00053 \n",
"[INFO] [2016-08-16 07:52:48,641:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 54. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:48,642:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.030551257175\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 9\n",
" classifier:xgradient_boosting:min_child_weight, Value: 20\n",
" classifier:xgradient_boosting:n_estimators, Value: 467\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.563343199931\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: pca\n",
" preprocessor:pca:keep_variance, Value: 0.515451906206\n",
" preprocessor:pca:whiten, Value: True\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:52:49,113:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 54. configuration. Duration: 0.426763; loss: 0.782787; status 1; additional run info: ;duration: 0.42676329612731934;num_run:00054 \n",
"[INFO] [2016-08-16 07:52:49,125:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 55. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:49,127:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 1.0\n",
" classifier:extra_trees:min_samples_leaf, Value: 1\n",
" classifier:extra_trees:min_samples_split, Value: 2\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000719988233049\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: gini\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 3.79481172531\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 1\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 2\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:49,628:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 55. configuration. Duration: 0.426129; loss: 0.717213; status 1; additional run info: ;duration: 0.4261291027069092;num_run:00055 \n",
"[INFO] [2016-08-16 07:52:49,635:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 56. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:49,636:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 4.53735847098\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 9\n",
" classifier:random_forest:min_samples_split, Value: 11\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00264323269243\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 11\n",
" preprocessor:gem:precond, Value: 0.441382077675\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:52:50,094:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.336066 1: 0.356557 2: 0.368852 3: 0.372951 4: 0.372951 5: 0.372951 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.372951 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [38, 12, 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.92 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 1) (1, 14) (1, 40)\n",
"[INFO] [2016-08-16 07:52:50,100:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:50,102:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.891642 seconds\n",
"[INFO] [2016-08-16 07:52:50,104:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:52:50,784:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 56. configuration. Duration: 1.112659; loss: 0.684426; status 1; additional run info: ;duration: 1.1126594543457031;num_run:00056 \n",
"[INFO] [2016-08-16 07:52:50,790:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 57. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:50,791:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 3.77509988997\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 1\n",
" classifier:random_forest:min_samples_split, Value: 4\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:52:51,076:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 57. configuration. Duration: 0.248880; loss: 0.737705; status 1; additional run info: ;duration: 0.24887990951538086;num_run:00057 \n",
"[INFO] [2016-08-16 07:52:51,082:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 58. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:51,083:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 0.0384013091944\n",
" classifier:libsvm_svc:gamma, Value: 0.543655430274\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: True\n",
" classifier:libsvm_svc:tol, Value: 0.00478209894787\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000140331374358\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: pca\n",
" preprocessor:pca:keep_variance, Value: 0.95292659874\n",
" preprocessor:pca:whiten, Value: False\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:51,162:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 58. configuration. Duration: 0.050980; loss: 0.860656; status 1; additional run info: ;duration: 0.050980329513549805;num_run:00058 \n",
"[INFO] [2016-08-16 07:52:51,168:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 59. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:51,169:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 0.680982525885\n",
" classifier:extra_trees:min_samples_leaf, Value: 2\n",
" classifier:extra_trees:min_samples_split, Value: 16\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00335999889804\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 0.511421619119\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 20\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 11\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:51,515:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 59. configuration. Duration: 0.305785; loss: 0.811475; status 1; additional run info: ;duration: 0.3057849407196045;num_run:00059 \n",
"[INFO] [2016-08-16 07:52:51,521:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 60. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:51,522:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 4.37570629591\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00051743423577\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:688: UserWarning: Variables are collinear\n",
" warnings.warn(\"Variables are collinear\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:51,623:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 60. configuration. Duration: 0.068121; loss: 0.905738; status 1; additional run info: ;duration: 0.06812095642089844;num_run:00060 \n",
"[INFO] [2016-08-16 07:52:51,629:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 61. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:51,630:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 4.04339655244\n",
" classifier:extra_trees:min_samples_leaf, Value: 12\n",
" classifier:extra_trees:min_samples_split, Value: 12\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 55.3443415821\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:52:51,814:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 61. configuration. Duration: 0.148920; loss: 0.700820; status 1; additional run info: ;duration: 0.14892029762268066;num_run:00061 \n",
"[INFO] [2016-08-16 07:52:51,820:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 62. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:51,821:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: False\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.07294123924\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 12\n",
" classifier:random_forest:min_samples_split, Value: 18\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:gamma, Value: 6.69631373117e-05\n",
" preprocessor:kernel_pca:kernel, Value: rbf\n",
" preprocessor:kernel_pca:n_components, Value: 446\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run7\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:52:52,120:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:53,016:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 62. configuration. Duration: 1.086819; loss: 0.856557; status 1; additional run info: ;duration: 1.0868191719055176;num_run:00062 \n",
"[INFO] [2016-08-16 07:52:53,024:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 63. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:53,026:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.51750461197\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 13\n",
" classifier:random_forest:min_samples_split, Value: 6\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.250169099735\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 88.0287717766\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:52:53,237:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 63. configuration. Duration: 0.174713; loss: 0.688525; status 1; additional run info: ;duration: 0.1747128963470459;num_run:00063 \n",
"[INFO] [2016-08-16 07:52:53,243:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 64. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:53,244:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 0.0227063685249\n",
" classifier:bernoulli_nb:fit_prior, Value: True\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.294260475445\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 200\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:52:53,289:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 64. configuration. Duration: 0.015303; loss: 0.811475; status 1; additional run info: ;duration: 0.01530313491821289;num_run:00064 \n",
"[INFO] [2016-08-16 07:52:53,295:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 65. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:53,297:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 2.22043699158\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 4\n",
" classifier:random_forest:min_samples_split, Value: 15\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.000557694994496\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:52:53,547:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 65. configuration. Duration: 0.212155; loss: 0.676230; status 1; additional run info: ;duration: 0.21215486526489258;num_run:00065 \n",
"[INFO] [2016-08-16 07:52:53,554:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 66. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:52:53,556:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 39\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 4.21779542004e-05\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.132885184135\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 0.668836501037\n",
" preprocessor:kitchen_sinks:n_components, Value: 1249\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:387: UserWarning: Variables are collinear.\n",
" warnings.warn(\"Variables are collinear.\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:52:54,647:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 66. configuration. Duration: 1.029061; loss: 0.971311; status 1; additional run info: ;duration: 1.0290608406066895;num_run:00066 \n",
"[INFO] [2016-08-16 07:52:54,768:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 55 training points for SMAC.\n",
"[INFO] [2016-08-16 07:52:54,835:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.336066 1: 0.356557 2: 0.368852 3: 0.372951 4: 0.360656 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.377049 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.377049 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.372951\n",
"\tMembers: [33, 8, 8, 8, 8, 33, 8, 8, 8, 33, 8, 8, 8, 8, 33, 8, 8, 8, 33, 8, 8, 8, 8, 33, 8, 8, 8, 8, 33, 8, 8, 8, 33, 8, 8, 8, 8, 33, 8, 8, 33, 8, 8, 8, 33, 8, 8, 8, 33, 8]\n",
"\tWeights: [ 0. 0. 0. 0. 0. 0. 0. 0. 0.76 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.24 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 40)\n",
"[INFO] [2016-08-16 07:52:54,842:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:54,844:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.727591 seconds\n",
"[INFO] [2016-08-16 07:52:54,847:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (59)!.\n",
"[INFO] [2016-08-16 07:52:54,849:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (59)!\n",
"[ERROR] [2016-08-16 07:52:54,860:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:52:56,899:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.336066 1: 0.356557 2: 0.368852 3: 0.372951 4: 0.360656 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.377049 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.377049 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.372951\n",
"\tMembers: [30, 8, 8, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 30, 8]\n",
"\tWeights: [ 0. 0. 0. 0. 0. 0. 0. 0. 0.76 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.24 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 40)\n",
"[INFO] [2016-08-16 07:52:56,905:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:52:56,907:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.053093 seconds\n",
"[INFO] [2016-08-16 07:52:56,909:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:53:05,719:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 10.95 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:53:05,725:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 67. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:05,726:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 9\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000235729688366\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:06,255:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 67. configuration. Duration: 0.484348; loss: 0.668033; status 1; additional run info: ;duration: 0.4843475818634033;num_run:00067 \n",
"[INFO] [2016-08-16 07:53:06,262:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 68. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:06,263:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0264166532664\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 9\n",
" classifier:xgradient_boosting:min_child_weight, Value: 6\n",
" classifier:xgradient_boosting:n_estimators, Value: 393\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.786710468821\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000488079067566\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 123\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run8\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:53:06,944:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:53:07,183:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 68. configuration. Duration: 0.887842; loss: 0.672131; status 1; additional run info: ;duration: 0.8878421783447266;num_run:00068 \n",
"[INFO] [2016-08-16 07:53:07,189:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 69. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:07,190:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 57.5873515211\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:07,719:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 69. configuration. Duration: 0.483470; loss: 0.663934; status 1; additional run info: ;duration: 0.48346996307373047;num_run:00069 \n",
"[INFO] [2016-08-16 07:53:07,726:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 70. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:07,727:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0437093472947\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 7\n",
" classifier:xgradient_boosting:min_child_weight, Value: 4\n",
" classifier:xgradient_boosting:n_estimators, Value: 149\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.693167152734\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00446346881981\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 281\n",
" preprocessor:feature_agglomeration:pooling_func, Value: mean\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:53:08,284:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 70. configuration. Duration: 0.520678; loss: 0.721311; status 1; additional run info: ;duration: 0.5206782817840576;num_run:00070 \n",
"[INFO] [2016-08-16 07:53:08,291:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 71. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:08,292:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:08,841:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.336066 1: 0.356557 2: 0.368852 3: 0.372951 4: 0.360656 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.372951 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.372951 39: 0.372951 40: 0.377049 41: 0.372951 42: 0.372951 43: 0.372951 44: 0.377049 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.372951\n",
"\tMembers: [30, 8, 8, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 8, 30, 8, 8, 30, 8, 8, 8, 30, 8, 8, 8, 30, 8]\n",
"\tWeights: [ 0. 0. 0. 0. 0. 0. 0. 0. 0.76 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.24 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 40)\n",
"[INFO] [2016-08-16 07:53:08,846:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:53:08,848:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.907761 seconds\n",
"[INFO] [2016-08-16 07:53:08,849:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:53:08,883:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 71. configuration. Duration: 0.545248; loss: 0.659836; status 1; additional run info: ;duration: 0.5452477931976318;num_run:00071 \n",
"[INFO] [2016-08-16 07:53:08,889:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 72. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:08,891:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 6.27815783649e-06\n",
" classifier:sgd:average, Value: False\n",
" classifier:sgd:eta0, Value: 0.0817613409425\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: invscaling\n",
" classifier:sgd:loss, Value: log\n",
" classifier:sgd:n_iter, Value: 21\n",
" classifier:sgd:penalty, Value: l2\n",
" classifier:sgd:power_t, Value: 0.180958599887\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.142269722722\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 0.908666044949\n",
" preprocessor:kitchen_sinks:n_components, Value: 158\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:09,096:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 72. configuration. Duration: 0.132915; loss: 0.897541; status 1; additional run info: ;duration: 0.13291549682617188;num_run:00072 \n",
"[INFO] [2016-08-16 07:53:09,102:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 73. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:09,104:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 50.0\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:09,600:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 73. configuration. Duration: 0.451949; loss: 0.668033; status 1; additional run info: ;duration: 0.451948881149292;num_run:00073 \n",
"[INFO] [2016-08-16 07:53:09,606:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 74. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:09,607:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 9.24649151176\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00110056298555\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:kernel, Value: cosine\n",
" preprocessor:kernel_pca:n_components, Value: 1243\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:688: UserWarning: Variables are collinear\n",
" warnings.warn(\"Variables are collinear\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:53:10,148:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 74. configuration. Duration: 0.454872; loss: 1.073770; status 1; additional run info: ;duration: 0.45487165451049805;num_run:00074 \n",
"[INFO] [2016-08-16 07:53:10,155:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 75. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:10,157:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.0\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 1\n",
" classifier:random_forest:min_samples_split, Value: 2\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 50.0\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:10,393:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 75. configuration. Duration: 0.196081; loss: 0.782787; status 1; additional run info: ;duration: 0.19608092308044434;num_run:00075 \n",
"[INFO] [2016-08-16 07:53:10,400:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 76. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:10,401:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gaussian_nb\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00517965246807\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:53:10,444:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 76. configuration. Duration: 0.013873; loss: 0.823770; status 1; additional run info: ;duration: 0.013872861862182617;num_run:00076 \n",
"[INFO] [2016-08-16 07:53:10,451:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 77. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:10,452:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.0\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 1\n",
" classifier:random_forest:min_samples_split, Value: 2\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0198498056687\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 50.0\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:10,686:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 77. configuration. Duration: 0.194153; loss: 0.782787; status 1; additional run info: ;duration: 0.1941533088684082;num_run:00077 \n",
"[INFO] [2016-08-16 07:53:10,692:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 78. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:10,694:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 123\n",
" classifier:lda:shrinkage, Value: auto\n",
" classifier:lda:tol, Value: 0.000155721537166\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000178050638743\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 0.646134432482\n",
" preprocessor:kitchen_sinks:n_components, Value: 3675\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run8\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:53:10,864:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:53:10,938:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:53:14,690:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.340164 1: 0.352459 2: 0.360656 3: 0.364754 4: 0.364754 5: 0.364754 6: 0.368852 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.368852 11: 0.368852 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.368852 20: 0.368852 21: 0.364754 22: 0.364754 23: 0.360656 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.377049 49: 0.377049\n",
"\tMembers: [48, 42, 13, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 49, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 27]\n",
"\tWeights: [ 0. 0.02 0. 0. 0. 0. 0. 0.02 0.86 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0.02 0.02]\n",
"\tIdentifiers: (1, 6) (1, 13) (1, 14) (1, 21) (1, 40) (1, 63) (1, 71) (1, 73)\n",
"[INFO] [2016-08-16 07:53:14,710:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.377049\n",
"[INFO] [2016-08-16 07:53:14,716:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 3.856121 seconds\n",
"[INFO] [2016-08-16 07:53:14,724:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (75)!.\n",
"[INFO] [2016-08-16 07:53:14,732:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (75)!\n",
"[INFO] [2016-08-16 07:53:39,741:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 78. configuration. Duration: 28.869441; loss: 0.860656; status 1; additional run info: ;duration: 28.86944079399109;num_run:00078 \n",
"[INFO] [2016-08-16 07:53:39,905:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 67 training points for SMAC.\n",
"[ERROR] [2016-08-16 07:53:40,863:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:53:40,925:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:53:42,810:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.340164 1: 0.352459 2: 0.360656 3: 0.364754 4: 0.364754 5: 0.364754 6: 0.368852 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.368852 11: 0.368852 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.368852 20: 0.368852 21: 0.364754 22: 0.364754 23: 0.360656 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.377049 49: 0.377049\n",
"\tMembers: [48, 42, 13, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 49, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 27]\n",
"\tWeights: [ 0. 0.02 0. 0. 0. 0. 0. 0.02 0.86 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0.02 0.02]\n",
"\tIdentifiers: (1, 6) (1, 13) (1, 14) (1, 21) (1, 40) (1, 63) (1, 71) (1, 73)\n",
"[INFO] [2016-08-16 07:53:42,816:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.377049\n",
"[INFO] [2016-08-16 07:53:42,817:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.959399 seconds\n",
"[INFO] [2016-08-16 07:53:42,819:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:53:52,008:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 12.1017 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:53:52,014:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 79. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:52,015:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:52,541:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 79. configuration. Duration: 0.482316; loss: 0.647541; status 1; additional run info: ;duration: 0.4823164939880371;num_run:00079 \n",
"[INFO] [2016-08-16 07:53:52,546:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 80. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:52,547:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 468.293592608\n",
" classifier:libsvm_svc:gamma, Value: 0.959239406041\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 0.00731388567194\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: gini\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 3.11868949584\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 12\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 13\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:53:52,802:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 80. configuration. Duration: 0.216865; loss: 0.815574; status 1; additional run info: ;duration: 0.21686458587646484;num_run:00080 \n",
"[INFO] [2016-08-16 07:53:52,808:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 81. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:52,810:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run9\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:53:52,866:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:53:52,915:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:53:53,408:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 81. configuration. Duration: 0.550602; loss: 0.659836; status 1; additional run info: ;duration: 0.5506021976470947;num_run:00081 \n",
"[INFO] [2016-08-16 07:53:53,415:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 82. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:53,417:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0109297642034\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 1\n",
" classifier:xgradient_boosting:min_child_weight, Value: 20\n",
" classifier:xgradient_boosting:n_estimators, Value: 284\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.739293931635\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00567671491912\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: pca\n",
" preprocessor:pca:keep_variance, Value: 0.955730415909\n",
" preprocessor:pca:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:53,623:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 82. configuration. Duration: 0.174173; loss: 0.704918; status 1; additional run info: ;duration: 0.17417311668395996;num_run:00082 \n",
"[INFO] [2016-08-16 07:53:53,629:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 83. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:53,631:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:54,264:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 83. configuration. Duration: 0.586940; loss: 0.647541; status 1; additional run info: ;duration: 0.5869402885437012;num_run:00083 \n",
"[INFO] [2016-08-16 07:53:54,270:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 84. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:54,272:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 89.7020369787\n",
" classifier:bernoulli_nb:fit_prior, Value: False\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000346762765191\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:53:54,346:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 84. configuration. Duration: 0.034073; loss: 0.897541; status 1; additional run info: ;duration: 0.0340728759765625;num_run:00084 \n",
"[INFO] [2016-08-16 07:53:54,355:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 85. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:54,357:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:54,870:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.377049 14: 0.381148 15: 0.389344 16: 0.393443 17: 0.393443 18: 0.393443 19: 0.393443 20: 0.389344 21: 0.389344 22: 0.385246 23: 0.377049 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.381148 28: 0.381148 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.377049 36: 0.377049 37: 0.377049 38: 0.385246 39: 0.389344 40: 0.389344 41: 0.389344 42: 0.389344 43: 0.389344 44: 0.389344 45: 0.389344 46: 0.389344 47: 0.389344 48: 0.389344 49: 0.389344\n",
"\tMembers: [49, 26, 41, 12, 26, 26, 8, 8, 8, 8, 8, 8, 8, 16, 26, 26, 26, 8, 8, 26, 8, 8, 8, 26, 26, 26, 42, 1, 8, 26, 26, 25, 8, 8, 8, 3, 8, 18, 45, 48, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8]\n",
"\tWeights: [ 0. 0.02 0. 0.02 0. 0. 0. 0. 0.54 0. 0. 0.\n",
" 0.02 0. 0. 0. 0.02 0. 0.02 0. 0. 0. 0. 0. 0.\n",
" 0.02 0.24 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.02 0.02 0. 0. 0.02 0. 0. 0.02 0.02]\n",
"\tIdentifiers: (1, 6) (1, 8) (1, 14) (1, 21) (1, 27) (1, 30) (1, 39) (1, 40) (1, 63) (1, 65) (1, 69) (1, 73) (1, 79)\n",
"[INFO] [2016-08-16 07:53:54,876:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.389344\n",
"[INFO] [2016-08-16 07:53:54,878:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.016004 seconds\n",
"[INFO] [2016-08-16 07:53:54,881:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (78)!.\n",
"[INFO] [2016-08-16 07:53:54,884:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (78)!\n",
"[ERROR] [2016-08-16 07:53:54,892:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:53:54,961:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:53:54,978:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 85. configuration. Duration: 0.567355; loss: 0.647541; status 1; additional run info: ;duration: 0.5673549175262451;num_run:00085 \n",
"[INFO] [2016-08-16 07:53:54,985:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 86. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:54,986:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 1.16783380991\n",
" classifier:bernoulli_nb:fit_prior, Value: False\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: logcosh\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 116, in _ica_par\n",
" warnings.warn('FastICA did not converge. Consider increasing '\n",
"UserWarning: FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:53:56,244:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 86. configuration. Duration: 1.252481; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:53:56,250:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 87. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:56,252:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.125602291896\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:53:56,796:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 87. configuration. Duration: 0.498615; loss: 0.647541; status 1; additional run info: ;duration: 0.4986145496368408;num_run:00087 \n",
"[INFO] [2016-08-16 07:53:56,802:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 88. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:53:56,802:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.368852 29: 0.368852 30: 0.364754 31: 0.377049 32: 0.385246 33: 0.389344 34: 0.389344 35: 0.389344 36: 0.389344 37: 0.389344 38: 0.389344 39: 0.385246 40: 0.385246 41: 0.385246 42: 0.385246 43: 0.385246 44: 0.385246 45: 0.385246 46: 0.385246 47: 0.385246 48: 0.385246 49: 0.385246\n",
"\tMembers: [46, 24, 38, 11, 24, 24, 7, 7, 7, 7, 48, 7, 7, 7, 7, 24, 24, 24, 7, 7, 7, 13, 7, 7, 7, 7, 7, 7, 7, 24, 11, 38, 48, 18, 7, 7, 7, 7, 24, 7, 7, 7, 7, 7, 7, 7, 24, 48, 7, 7]\n",
"\tWeights: [ 0. 0. 0. 0. 0. 0. 0. 0.62 0. 0. 0. 0.04\n",
" 0. 0.02 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.18 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0.04 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.06\n",
" 0. ]\n",
"\tIdentifiers: (1, 14) (1, 21) (1, 25) (1, 31) (1, 40) (1, 63) (1, 79) (1, 82)[INFO] [2016-08-16 07:53:56,804:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.80143446159\n",
" classifier:adaboost:max_depth, Value: 5\n",
" classifier:adaboost:n_estimators, Value: 194\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:gamma, Value: 0.00790127386495\n",
" preprocessor:kernel_pca:kernel, Value: rbf\n",
" preprocessor:kernel_pca:n_components, Value: 152\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"\n",
"[INFO] [2016-08-16 07:53:56,809:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.385246\n",
"[INFO] [2016-08-16 07:53:56,810:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.922933 seconds\n",
"[INFO] [2016-08-16 07:53:56,813:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (82)!.\n",
"[INFO] [2016-08-16 07:53:56,816:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (82)!\n",
"[ERROR] [2016-08-16 07:53:56,825:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:53:56,949:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:53:59,005:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.385246 26: 0.385246 27: 0.385246 28: 0.385246 29: 0.385246 30: 0.385246 31: 0.385246 32: 0.385246 33: 0.385246 34: 0.385246 35: 0.385246 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.377049 43: 0.377049 44: 0.377049 45: 0.377049 46: 0.377049 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [44, 22, 36, 10, 22, 22, 7, 7, 7, 7, 46, 7, 7, 7, 7, 22, 22, 22, 7, 7, 7, 7, 44, 2, 35, 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, 22, 7, 7, 22, 7, 7, 7, 7, 7, 7, 7, 21, 2, 7, 7]\n",
"\tWeights: [ 0. 0. 0.04 0. 0. 0. 0. 0.64 0. 0. 0.02 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0.02 0.16 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0.02 0. 0.\n",
" 0. 0. 0. 0. 0. 0.04 0. 0.02 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 21) (1, 27) (1, 39) (1, 40) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:53:59,013:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:53:59,014:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.193091 seconds\n",
"[INFO] [2016-08-16 07:53:59,017:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (84)!.\n",
"[INFO] [2016-08-16 07:53:59,019:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (84)!\n",
"[INFO] [2016-08-16 07:54:07,598:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 88. configuration. Duration: 10.672352; loss: 0.799180; status 1; additional run info: ;duration: 10.672351837158203;num_run:00088 \n",
"[INFO] [2016-08-16 07:54:07,763:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 74 training points for SMAC.\n",
"[ERROR] [2016-08-16 07:54:09,057:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:54:09,112:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:54:11,098:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.385246 26: 0.385246 27: 0.385246 28: 0.385246 29: 0.385246 30: 0.385246 31: 0.385246 32: 0.385246 33: 0.385246 34: 0.385246 35: 0.385246 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.377049 43: 0.377049 44: 0.377049 45: 0.377049 46: 0.377049 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [44, 22, 36, 10, 22, 22, 7, 7, 7, 7, 46, 7, 7, 7, 7, 22, 22, 22, 7, 7, 7, 7, 44, 2, 35, 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, 22, 7, 7, 22, 7, 7, 7, 7, 7, 7, 7, 21, 2, 7, 7]\n",
"\tWeights: [ 0. 0. 0.04 0. 0. 0. 0. 0.64 0. 0. 0.02 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0.02 0.16 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0.02 0. 0.\n",
" 0. 0. 0. 0. 0. 0.04 0. 0.02 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 21) (1, 27) (1, 39) (1, 40) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:54:11,105:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:54:11,107:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.053967 seconds\n",
"[INFO] [2016-08-16 07:54:11,109:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:54:20,040:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 12.2755 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:54:20,045:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 89. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:20,047:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:20,526:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 89. configuration. Duration: 0.436658; loss: 0.647541; status 1; additional run info: ;duration: 0.4366579055786133;num_run:00089 \n",
"[INFO] [2016-08-16 07:54:20,531:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 90. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:20,533:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 207\n",
" classifier:lda:shrinkage, Value: manual\n",
" classifier:lda:shrinkage_factor, Value: 0.497327276651\n",
" classifier:lda:tol, Value: 1.02547648296e-05\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.270933580466\n",
" preprocessor:select_rates:mode, Value: fpr\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:54:20,580:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 90. configuration. Duration: 0.020699; loss: 0.827869; status 1; additional run info: ;duration: 0.020699024200439453;num_run:00090 \n",
"[INFO] [2016-08-16 07:54:20,586:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 91. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:20,587:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:21,090:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 91. configuration. Duration: 0.462179; loss: 0.647541; status 1; additional run info: ;duration: 0.46217918395996094;num_run:00091 \n",
"[INFO] [2016-08-16 07:54:21,096:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 92. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:21,097:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 2245.7923256\n",
" classifier:libsvm_svc:gamma, Value: 0.000112257171296\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 5.99568531548e-05\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 2\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 13\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 14\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 65\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run12\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:54:21,147:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:54:21,205:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:54:21,591:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 92. configuration. Duration: 0.456091; loss: 0.713115; status 1; additional run info: ;duration: 0.45609068870544434;num_run:00092 \n",
"[INFO] [2016-08-16 07:54:21,596:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 93. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:21,599:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:22,233:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 93. configuration. Duration: 0.586157; loss: 0.647541; status 1; additional run info: ;duration: 0.5861566066741943;num_run:00093 \n",
"[INFO] [2016-08-16 07:54:22,241:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 94. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:22,242:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 2.13964151757\n",
" classifier:extra_trees:min_samples_leaf, Value: 1\n",
" classifier:extra_trees:min_samples_split, Value: 15\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:coef0, Value: 0.696928506788\n",
" preprocessor:kernel_pca:kernel, Value: sigmoid\n",
" preprocessor:kernel_pca:n_components, Value: 1652\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:54:23,281:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 94. configuration. Duration: 0.953923; loss: 0.971311; status 1; additional run info: ;duration: 0.953923225402832;num_run:00094 \n",
"[INFO] [2016-08-16 07:54:23,289:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 95. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:23,291:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:23,498:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [42, 20, 34, 9, 20, 20, 6, 6, 6, 6, 44, 6, 6, 6, 6, 20, 20, 20, 6, 6, 6, 6, 42, 2, 33, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 42, 6, 6, 6, 6, 6, 44, 8, 28]\n",
"\tWeights: [ 0. 0. 0.02 0. 0. 0. 0.66 0. 0.02 0.02 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.12 0. 0. 0. 0. 0.\n",
" 0. 0. 0.02 0. 0. 0. 0. 0.02 0.02 0. 0. 0. 0.\n",
" 0. 0. 0. 0.06 0. 0.04 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:54:23,505:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:54:23,507:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.363186 seconds\n",
"[INFO] [2016-08-16 07:54:23,509:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (88)!.\n",
"[INFO] [2016-08-16 07:54:23,510:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (88)!\n",
"[ERROR] [2016-08-16 07:54:23,518:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:54:23,577:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:54:23,874:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 95. configuration. Duration: 0.526535; loss: 0.647541; status 1; additional run info: ;duration: 0.5265350341796875;num_run:00095 \n",
"[INFO] [2016-08-16 07:54:23,882:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 96. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:23,884:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.369195645178\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 3\n",
" classifier:xgradient_boosting:min_child_weight, Value: 4\n",
" classifier:xgradient_boosting:n_estimators, Value: 383\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.474904422007\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000178367917347\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:54:24,457:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 96. configuration. Duration: 0.541717; loss: 0.790984; status 1; additional run info: ;duration: 0.5417165756225586;num_run:00096 \n",
"[INFO] [2016-08-16 07:54:24,463:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 97. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:24,464:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:25,011:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 97. configuration. Duration: 0.502587; loss: 0.647541; status 1; additional run info: ;duration: 0.5025866031646729;num_run:00097 \n",
"[INFO] [2016-08-16 07:54:25,017:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 98. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:25,019:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 165\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 0.00461882611568\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0033602023141\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.412584165757\n",
" preprocessor:select_rates:mode, Value: fwe\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:54:25,070:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 98. configuration. Duration: 0.022011; loss: 0.844262; status 1; additional run info: ;duration: 0.02201080322265625;num_run:00098 \n",
"[INFO] [2016-08-16 07:54:25,077:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 99. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:25,079:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:25,422:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [40, 19, 32, 8, 19, 19, 5, 5, 5, 5, 42, 5, 5, 5, 5, 19, 19, 19, 5, 5, 5, 5, 40, 2, 31, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 40, 5, 5, 5, 5, 5, 42, 7, 26]\n",
"\tWeights: [ 0. 0. 0.02 0. 0. 0.66 0. 0.02 0.02 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0.12 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0.02 0.02 0. 0. 0. 0. 0. 0.\n",
" 0. 0.06 0. 0.04 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:54:25,430:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:54:25,432:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.918160 seconds\n",
"[INFO] [2016-08-16 07:54:25,434:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:54:25,689:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 99. configuration. Duration: 0.564104; loss: 0.647541; status 1; additional run info: ;duration: 0.5641043186187744;num_run:00099 \n",
"[INFO] [2016-08-16 07:54:25,695:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 100. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:25,697:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.00295770524785\n",
" classifier:sgd:average, Value: True\n",
" classifier:sgd:eta0, Value: 0.0791358187109\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: optimal\n",
" classifier:sgd:loss, Value: squared_hinge\n",
" classifier:sgd:n_iter, Value: 253\n",
" classifier:sgd:penalty, Value: l2\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 15\n",
" preprocessor:gem:precond, Value: 0.0130724105216\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:54:26,132:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 100. configuration. Duration: 0.404355; loss: 0.758197; status 1; additional run info: ;duration: 0.4043548107147217;num_run:00100 \n",
"[INFO] [2016-08-16 07:54:26,138:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 101. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:26,140:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:26,726:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 101. configuration. Duration: 0.539549; loss: 0.647541; status 1; additional run info: ;duration: 0.5395493507385254;num_run:00101 \n",
"[INFO] [2016-08-16 07:54:26,733:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 102. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:26,734:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: multinomial_nb\n",
" classifier:multinomial_nb:alpha, Value: 1.57793623654\n",
" classifier:multinomial_nb:fit_prior, Value: False\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:54:26,781:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 102. configuration. Duration: 0.016250; loss: 0.844262; status 1; additional run info: ;duration: 0.0162503719329834;num_run:00102 \n",
"[INFO] [2016-08-16 07:54:26,788:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 103. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:26,789:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:27,347:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 103. configuration. Duration: 0.512883; loss: 0.647541; status 1; additional run info: ;duration: 0.5128834247589111;num_run:00103 \n",
"[INFO] [2016-08-16 07:54:27,354:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 104. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:27,356:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.000696549061563\n",
" classifier:sgd:average, Value: False\n",
" classifier:sgd:eta0, Value: 0.0359119060582\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: constant\n",
" classifier:sgd:loss, Value: log\n",
" classifier:sgd:n_iter, Value: 786\n",
" classifier:sgd:penalty, Value: l2\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run13\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:54:27,448:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:54:27,506:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:54:28,110:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 104. configuration. Duration: 0.723044; loss: 0.860656; status 1; additional run info: ;duration: 0.7230443954467773;num_run:00104 \n",
"[INFO] [2016-08-16 07:54:28,117:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 105. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:28,120:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.754722385843\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 5\n",
" classifier:gradient_boosting:max_features, Value: 4.76296464731\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 7\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 241\n",
" classifier:gradient_boosting:subsample, Value: 0.485710310047\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.41950170311\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 77.8585470502\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:29,323:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [35, 17, 27, 7, 17, 17, 4, 4, 4, 4, 37, 4, 4, 4, 4, 17, 17, 17, 4, 4, 4, 4, 35, 1, 26, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 35, 4, 4, 4, 4, 4, 37, 6, 22]\n",
"\tWeights: [ 0. 0.02 0. 0. 0.66 0. 0.02 0.02 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.12 0. 0. 0. 0. 0.02 0. 0. 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0.06 0. 0.04\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:54:29,332:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:54:29,334:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.889778 seconds\n",
"[INFO] [2016-08-16 07:54:29,336:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run13\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:54:31,350:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:54:31,409:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:54:31,452:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 105. configuration. Duration: 3.262542; loss: 0.950820; status 1; additional run info: ;duration: 3.2625417709350586;num_run:00105 \n",
"[INFO] [2016-08-16 07:54:31,459:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 106. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:31,461:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.18850827931\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 3\n",
" classifier:random_forest:min_samples_split, Value: 17\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 16\n",
" preprocessor:gem:precond, Value: 0.0521570323336\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:54:32,014:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 106. configuration. Duration: 0.512922; loss: 0.704918; status 1; additional run info: ;duration: 0.5129220485687256;num_run:00106 \n",
"[INFO] [2016-08-16 07:54:32,021:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 107. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:32,024:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.251753634483\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 17\n",
" classifier:xgradient_boosting:n_estimators, Value: 349\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.892626970885\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 89.9904282829\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:54:32,583:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 107. configuration. Duration: 0.525461; loss: 0.795082; status 1; additional run info: ;duration: 0.525460958480835;num_run:00107 \n",
"[INFO] [2016-08-16 07:54:32,763:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 85 training points for SMAC.\n",
"[INFO] [2016-08-16 07:54:33,274:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [35, 17, 27, 7, 17, 17, 4, 4, 4, 4, 37, 4, 4, 4, 4, 17, 17, 17, 4, 4, 4, 4, 35, 1, 26, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 35, 4, 4, 4, 4, 4, 37, 6, 22]\n",
"\tWeights: [ 0. 0.02 0. 0. 0.66 0. 0.02 0.02 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.12 0. 0. 0. 0. 0.02 0. 0. 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0.06 0. 0.04\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:54:33,280:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:54:33,281:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.935196 seconds\n",
"[INFO] [2016-08-16 07:54:33,283:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run13\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:54:35,299:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:54:35,358:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:54:37,285:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [34, 16, 26, 6, 16, 16, 3, 3, 3, 3, 36, 3, 3, 3, 3, 16, 16, 16, 3, 3, 3, 3, 34, 0, 25, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 34, 3, 3, 3, 3, 3, 36, 5, 21]\n",
"\tWeights: [ 0.02 0. 0. 0.66 0. 0.02 0.02 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.12 0. 0. 0. 0. 0.02 0. 0. 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0.06 0. 0.04\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:54:37,293:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:54:37,295:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.000303 seconds\n",
"[INFO] [2016-08-16 07:54:37,298:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:54:44,827:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 12.0615 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:54:44,833:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 108. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:44,834:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.0140929261221\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 5\n",
" classifier:gradient_boosting:max_features, Value: 1.98361471899\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 18\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 491\n",
" classifier:gradient_boosting:subsample, Value: 0.478262930842\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00174982231698\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 11.7187438086\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:55,449:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 108. configuration. Duration: 10.520200; loss: 0.946721; status 1; additional run info: ;duration: 10.5201997756958;num_run:00108 \n",
"[INFO] [2016-08-16 07:54:55,455:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 109. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:55,456:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 3.15808734576\n",
" classifier:extra_trees:min_samples_leaf, Value: 13\n",
" classifier:extra_trees:min_samples_split, Value: 17\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00582881233453\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:coef0, Value: 0.707819425797\n",
" preprocessor:kernel_pca:degree, Value: 3\n",
" preprocessor:kernel_pca:gamma, Value: 0.471951347077\n",
" preprocessor:kernel_pca:kernel, Value: poly\n",
" preprocessor:kernel_pca:n_components, Value: 924\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:54:56,292:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 109. configuration. Duration: 0.755989; loss: 0.786885; status 1; additional run info: ;duration: 0.755988597869873;num_run:00109 \n",
"[INFO] [2016-08-16 07:54:56,300:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 110. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:54:56,302:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00194838002235\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 13\n",
" preprocessor:gem:precond, Value: 0.467735915172\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:54:57,039:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 110. configuration. Duration: 0.687126; loss: 0.725410; status 1; additional run info: ;duration: 0.6871263980865479;num_run:00110 \n",
"[INFO] [2016-08-16 07:54:57,218:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 88 training points for SMAC.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run13\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:54:57,371:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:54:57,431:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:54:59,300:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [34, 16, 26, 6, 16, 16, 3, 3, 3, 3, 36, 3, 3, 3, 3, 16, 16, 16, 3, 3, 3, 3, 34, 0, 25, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 34, 3, 3, 3, 3, 3, 36, 5, 21]\n",
"\tWeights: [ 0.02 0. 0. 0.66 0. 0.02 0.02 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.12 0. 0. 0. 0. 0.02 0. 0. 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0.06 0. 0.04\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:54:59,306:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:54:59,309:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.942131 seconds\n",
"[INFO] [2016-08-16 07:54:59,310:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:55:12,969:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 15.7494 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:55:12,975:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 111. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:12,976:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 6\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:13,604:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 111. configuration. Duration: 0.574477; loss: 0.680328; status 1; additional run info: ;duration: 0.5744767189025879;num_run:00111 \n",
"[INFO] [2016-08-16 07:55:13,611:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 112. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:13,613:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.00929438164542\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: squared_hinge\n",
" classifier:passive_aggressive:n_iter, Value: 99\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.120247338594\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 3.63940092656\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 1\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 15\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:55:13,981:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 112. configuration. Duration: 0.321251; loss: 0.754098; status 1; additional run info: ;duration: 0.32125091552734375;num_run:00112 \n",
"[INFO] [2016-08-16 07:55:13,987:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 113. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:13,989:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:14,593:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 113. configuration. Duration: 0.549000; loss: 0.672131; status 1; additional run info: ;duration: 0.5490000247955322;num_run:00113 \n",
"[INFO] [2016-08-16 07:55:14,600:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 114. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:14,602:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: k_nearest_neighbors\n",
" classifier:k_nearest_neighbors:n_neighbors, Value: 9\n",
" classifier:k_nearest_neighbors:p, Value: 1\n",
" classifier:k_nearest_neighbors:weights, Value: uniform\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000138480060248\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:kernel, Value: cosine\n",
" preprocessor:kernel_pca:n_components, Value: 1777\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:55:15,040:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 114. configuration. Duration: 0.401957; loss: 0.745902; status 1; additional run info: ;duration: 0.40195727348327637;num_run:00114 \n",
"[INFO] [2016-08-16 07:55:15,047:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 115. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:15,049:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 8\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run13\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:55:15,374:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:55:15,440:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:55:15,678:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 115. configuration. Duration: 0.574211; loss: 0.676230; status 1; additional run info: ;duration: 0.5742108821868896;num_run:00115 \n",
"[INFO] [2016-08-16 07:55:15,685:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 116. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:15,687:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.841568514067\n",
" classifier:adaboost:max_depth, Value: 2\n",
" classifier:adaboost:n_estimators, Value: 369\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 5.6949351956\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 1.76018529899e-05\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:55:16,923:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 116. configuration. Duration: 1.177393; loss: 0.745902; status 1; additional run info: ;duration: 1.1773934364318848;num_run:00116 \n",
"[INFO] [2016-08-16 07:55:16,930:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 117. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:16,932:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:17,489:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [32, 14, 24, 5, 14, 14, 3, 3, 3, 3, 34, 3, 3, 3, 3, 14, 14, 14, 3, 3, 3, 3, 32, 0, 23, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 32, 3, 3, 3, 3, 3, 34, 4, 19]\n",
"\tWeights: [ 0.02 0. 0. 0.66 0.02 0.02 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0.12 0. 0. 0. 0. 0.02 0. 0. 0. 0.02 0.02\n",
" 0. 0. 0. 0. 0. 0. 0. 0.06 0. 0.04 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:55:17,497:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:55:17,499:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.130225 seconds\n",
"[INFO] [2016-08-16 07:55:17,502:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:55:17,578:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 117. configuration. Duration: 0.591630; loss: 0.688525; status 1; additional run info: ;duration: 0.5916295051574707;num_run:00117 \n",
"[INFO] [2016-08-16 07:55:17,586:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 118. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:17,588:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 49\n",
" classifier:lda:shrinkage, Value: auto\n",
" classifier:lda:tol, Value: 0.00431693849262\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 1.49959402131\n",
" preprocessor:kitchen_sinks:n_components, Value: 106\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:55:17,716:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 118. configuration. Duration: 0.072633; loss: 0.860656; status 1; additional run info: ;duration: 0.07263302803039551;num_run:00118 \n",
"[INFO] [2016-08-16 07:55:17,723:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 119. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:17,725:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:18,393:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 119. configuration. Duration: 0.613678; loss: 0.672131; status 1; additional run info: ;duration: 0.6136782169342041;num_run:00119 \n",
"[INFO] [2016-08-16 07:55:18,402:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 120. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:18,404:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: proj_logit\n",
" classifier:proj_logit:max_epochs, Value: 16\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.0562833850085\n",
" preprocessor:select_rates:mode, Value: fwe\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/feature_selection/base.py:80: UserWarning: No features were selected: either the data is too noisy or the selection test too strict.\n",
" UserWarning)\n",
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 148, in _pre_transform\n",
" .transform(Xt)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/select_rates.py\", line 72, in transform\n",
" \"%s removed all features.\" % self.__class__.__name__)\n",
"ValueError: SelectRates removed all features.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:55:19,482:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 120. configuration. Duration: 1.071274; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:55:19,490:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 121. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:19,492:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run13\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:55:19,517:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:55:19,580:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:55:20,103:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 121. configuration. Duration: 0.556868; loss: 0.659836; status 1; additional run info: ;duration: 0.556868314743042;num_run:00121 \n",
"[INFO] [2016-08-16 07:55:20,111:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 122. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:20,114:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.598744078036\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 7\n",
" classifier:xgradient_boosting:min_child_weight, Value: 12\n",
" classifier:xgradient_boosting:n_estimators, Value: 187\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.332950950447\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0592577452007\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.345444684314\n",
" preprocessor:select_rates:mode, Value: fdr\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:55:20,356:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 122. configuration. Duration: 0.205959; loss: 0.848361; status 1; additional run info: ;duration: 0.20595932006835938;num_run:00122 \n",
"[INFO] [2016-08-16 07:55:20,363:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 123. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:20,366:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 9\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:20,959:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 123. configuration. Duration: 0.540544; loss: 0.684426; status 1; additional run info: ;duration: 0.5405442714691162;num_run:00123 \n",
"[INFO] [2016-08-16 07:55:20,967:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 124. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:20,969:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: proj_logit\n",
" classifier:proj_logit:max_epochs, Value: 5\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 116\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:55:21,044:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 124. configuration. Duration: 0.032275; loss: 0.860656; status 1; additional run info: ;duration: 0.03227496147155762;num_run:00124 \n",
"[INFO] [2016-08-16 07:55:21,052:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 125. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:21,055:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 5\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:21,541:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.381148 14: 0.381148 15: 0.381148 16: 0.381148 17: 0.381148 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.372951 22: 0.372951 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.381148 28: 0.381148 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.377049 41: 0.377049 42: 0.372951 43: 0.372951 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.377049 49: 0.381148\n",
"\tMembers: [29, 12, 22, 5, 12, 12, 3, 3, 3, 3, 31, 3, 3, 3, 3, 12, 12, 12, 3, 3, 3, 3, 29, 0, 21, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 29, 3, 3, 3, 3, 3, 31, 4, 17]\n",
"\tWeights: [ 0.02 0. 0. 0.66 0.02 0.02 0. 0. 0. 0. 0. 0.\n",
" 0.12 0. 0. 0. 0. 0.02 0. 0. 0. 0.02 0.02 0. 0.\n",
" 0. 0. 0. 0. 0.06 0. 0.04 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 8) (1, 14) (1, 19) (1, 21) (1, 40) (1, 52) (1, 61) (1, 63) (1, 79) (1, 82)\n",
"[INFO] [2016-08-16 07:55:21,548:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:55:21,551:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.038180 seconds\n",
"[INFO] [2016-08-16 07:55:21,553:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:55:21,658:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 125. configuration. Duration: 0.550006; loss: 0.668033; status 1; additional run info: ;duration: 0.5500056743621826;num_run:00125 \n",
"[INFO] [2016-08-16 07:55:21,667:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 126. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:21,669:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0102758029498\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 4\n",
" classifier:xgradient_boosting:min_child_weight, Value: 20\n",
" classifier:xgradient_boosting:n_estimators, Value: 370\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.713879366557\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0811542628976\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 250\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:55:22,244:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 126. configuration. Duration: 0.537631; loss: 0.684426; status 1; additional run info: ;duration: 0.5376310348510742;num_run:00126 \n",
"[INFO] [2016-08-16 07:55:22,250:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 127. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:22,252:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00345710317446\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:22,862:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 127. configuration. Duration: 0.557615; loss: 0.688525; status 1; additional run info: ;duration: 0.557614803314209;num_run:00127 \n",
"[INFO] [2016-08-16 07:55:22,869:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 128. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:22,871:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 49\n",
" classifier:lda:shrinkage, Value: auto\n",
" classifier:lda:tol, Value: 3.52865677017e-05\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0864867962725\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 310\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:55:22,929:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 128. configuration. Duration: 0.025280; loss: 0.860656; status 1; additional run info: ;duration: 0.025279521942138672;num_run:00128 \n",
"[INFO] [2016-08-16 07:55:22,936:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 129. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:22,938:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 2.91968662088\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 7\n",
" classifier:random_forest:min_samples_split, Value: 10\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0162769660733\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:23,190:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 129. configuration. Duration: 0.210661; loss: 0.696721; status 1; additional run info: ;duration: 0.21066069602966309;num_run:00129 \n",
"[INFO] [2016-08-16 07:55:23,197:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 130. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:23,199:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 9\n",
" classifier:lda:shrinkage, Value: auto\n",
" classifier:lda:tol, Value: 2.99467158145e-05\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 6\n",
" preprocessor:gem:precond, Value: 0.377989414122\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:23,306:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 130. configuration. Duration: 0.062868; loss: 0.713115; status 1; additional run info: ;duration: 0.06286764144897461;num_run:00130 \n",
"[INFO] [2016-08-16 07:55:23,313:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 131. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:23,315:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.0787897825807\n",
" classifier:adaboost:max_depth, Value: 4\n",
" classifier:adaboost:n_estimators, Value: 290\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run13\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:55:23,569:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:55:23,632:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:55:24,542:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 131. configuration. Duration: 1.172038; loss: 0.741803; status 1; additional run info: ;duration: 1.1720380783081055;num_run:00131 \n",
"[INFO] [2016-08-16 07:55:24,550:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 132. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:24,551:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.284771318748\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 7\n",
" classifier:xgradient_boosting:n_estimators, Value: 50\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.891702003902\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: gini\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 3.6809589602\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 16\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 6\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:55:24,863:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 132. configuration. Duration: 0.268478; loss: 0.729508; status 1; additional run info: ;duration: 0.2684783935546875;num_run:00132 \n",
"[INFO] [2016-08-16 07:55:24,870:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 133. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:24,871:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: False\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.22532262467\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 12\n",
" classifier:random_forest:min_samples_split, Value: 15\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:55:25,216:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 133. configuration. Duration: 0.304640; loss: 0.696721; status 1; additional run info: ;duration: 0.3046400547027588;num_run:00133 \n",
"[INFO] [2016-08-16 07:55:25,223:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 134. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:25,225:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 4.19487985279\n",
" classifier:libsvm_svc:gamma, Value: 0.53354242842\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 0.0960682255726\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00242775775823\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: pca\n",
" preprocessor:pca:keep_variance, Value: 0.927350573044\n",
" preprocessor:pca:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:55:25,317:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 134. configuration. Duration: 0.059646; loss: 0.782787; status 1; additional run info: ;duration: 0.0596463680267334;num_run:00134 \n",
"[INFO] [2016-08-16 07:55:25,324:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 135. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:25,325:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 4.33232458859\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 14\n",
" classifier:random_forest:min_samples_split, Value: 18\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 18\n",
" preprocessor:gem:precond, Value: 0.256012088744\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:55:25,610:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.377049 14: 0.377049 15: 0.377049 16: 0.377049 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.377049 36: 0.377049 37: 0.377049 38: 0.377049 39: 0.377049 40: 0.377049 41: 0.377049 42: 0.377049 43: 0.377049 44: 0.377049 45: 0.377049 46: 0.377049 47: 0.377049 48: 0.377049 49: 0.377049\n",
"\tMembers: [26, 11, 19, 4, 11, 11, 2, 2, 2, 2, 2, 2, 2, 11, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 25, 2, 2, 2, 18, 2, 2, 23, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n",
"\tWeights: [ 0. 0. 0.78 0. 0.02 0. 0. 0. 0. 0. 0. 0.1 0.\n",
" 0. 0. 0. 0. 0. 0.02 0.02 0. 0. 0. 0.02 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 21) (1, 40) (1, 61) (1, 63) (1, 69) (1, 73) (1, 79)\n",
"[INFO] [2016-08-16 07:55:25,616:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.377049\n",
"[INFO] [2016-08-16 07:55:25,618:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.052617 seconds\n",
"[INFO] [2016-08-16 07:55:25,620:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (126)!.\n",
"[INFO] [2016-08-16 07:55:25,623:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (126)!\n",
"[ERROR] [2016-08-16 07:55:25,633:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:55:25,698:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:55:25,848:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 135. configuration. Duration: 0.479432; loss: 0.700820; status 1; additional run info: ;duration: 0.4794318675994873;num_run:00135 \n",
"[INFO] [2016-08-16 07:55:25,855:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 136. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:25,857:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 0.588128709313\n",
" classifier:extra_trees:min_samples_leaf, Value: 15\n",
" classifier:extra_trees:min_samples_split, Value: 15\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.124377321709\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 10\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 5\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 6\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 86\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:26,331:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 136. configuration. Duration: 0.425857; loss: 0.795082; status 1; additional run info: ;duration: 0.4258568286895752;num_run:00136 \n",
"[INFO] [2016-08-16 07:55:26,338:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 137. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:26,340:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 0.802700607691\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 15\n",
" classifier:random_forest:min_samples_split, Value: 7\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00077613349477\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 10\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 10\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 3\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 95\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:55:26,826:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 137. configuration. Duration: 0.438260; loss: 0.750000; status 1; additional run info: ;duration: 0.4382603168487549;num_run:00137 \n",
"[INFO] [2016-08-16 07:55:26,834:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 138. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:26,835:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.0157110782873\n",
" classifier:sgd:average, Value: False\n",
" classifier:sgd:eta0, Value: 0.0526962841652\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:l1_ratio, Value: 1.68232280645e-08\n",
" classifier:sgd:learning_rate, Value: constant\n",
" classifier:sgd:loss, Value: perceptron\n",
" classifier:sgd:n_iter, Value: 121\n",
" classifier:sgd:penalty, Value: elasticnet\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0114913918957\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:kernel, Value: cosine\n",
" preprocessor:kernel_pca:n_components, Value: 164\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:27,635:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 138. configuration. Duration: 0.723895; loss: 0.831967; status 1; additional run info: ;duration: 0.7238950729370117;num_run:00138 \n",
"[INFO] [2016-08-16 07:55:27,643:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 139. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:27,645:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 3.06538605845\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 19\n",
" classifier:random_forest:min_samples_split, Value: 19\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0816291391888\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: manhattan\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 50\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:55:27,926:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.377049 14: 0.377049 15: 0.377049 16: 0.377049 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.381148 32: 0.385246 33: 0.385246 34: 0.385246 35: 0.385246 36: 0.389344 37: 0.389344 38: 0.389344 39: 0.389344 40: 0.385246 41: 0.385246 42: 0.385246 43: 0.385246 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [25, 11, 18, 4, 11, 11, 2, 2, 2, 2, 2, 2, 2, 11, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 24, 2, 2, 2, 2, 2, 7, 7, 11, 2, 2, 2, 2, 2, 2, 2, 11, 2, 2, 2, 11, 2, 2, 2, 2, 2]\n",
"\tWeights: [ 0. 0. 0.72 0. 0.02 0. 0. 0.04 0. 0. 0. 0.16\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 21) (1, 31) (1, 40) (1, 63) (1, 73) (1, 79)\n",
"[INFO] [2016-08-16 07:55:27,933:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:55:27,935:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.307051 seconds\n",
"[INFO] [2016-08-16 07:55:27,938:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (130)!.\n",
"[INFO] [2016-08-16 07:55:27,940:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (130)!\n",
"[ERROR] [2016-08-16 07:55:27,949:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:55:28,018:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:55:28,142:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 139. configuration. Duration: 0.454250; loss: 0.704918; status 1; additional run info: ;duration: 0.45424962043762207;num_run:00139 \n",
"[INFO] [2016-08-16 07:55:28,150:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 140. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:28,152:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.596714345182\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 2\n",
" classifier:xgradient_boosting:min_child_weight, Value: 2\n",
" classifier:xgradient_boosting:n_estimators, Value: 496\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.78319831193\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 7\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 2\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 10\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 68\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:30,304:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.377049 14: 0.377049 15: 0.377049 16: 0.377049 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.381148 32: 0.385246 33: 0.385246 34: 0.385246 35: 0.385246 36: 0.389344 37: 0.389344 38: 0.389344 39: 0.389344 40: 0.385246 41: 0.385246 42: 0.385246 43: 0.385246 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [25, 11, 18, 4, 11, 11, 2, 2, 2, 2, 2, 2, 2, 11, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 24, 2, 2, 2, 2, 2, 7, 7, 11, 2, 2, 2, 2, 2, 2, 2, 11, 2, 2, 2, 11, 2, 2, 2, 2, 2]\n",
"\tWeights: [ 0. 0. 0.72 0. 0.02 0. 0. 0.04 0. 0. 0. 0.16\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 21) (1, 31) (1, 40) (1, 63) (1, 73) (1, 79)\n",
"[INFO] [2016-08-16 07:55:30,311:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:55:30,314:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.368931 seconds\n",
"[INFO] [2016-08-16 07:55:30,316:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 60, in fit_predict_and_loss\n",
" return self.predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 115, in predict_and_loss\n",
" Y_optimization_pred, Y_valid_pred, Y_test_pred = self._predict()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 98, in _predict\n",
" self.Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/abstract_evaluator.py\", line 266, in _predict_proba\n",
" Y_pred = model.predict_proba(X, batch_size=1000)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 120, in predict_proba\n",
" target = self.predict_proba(X[0:2].copy())\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 112, in predict_proba\n",
" return self.pipeline_.steps[-1][-1].predict_proba(Xt)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/classification/xgradient_boosting.py\", line 141, in predict_proba\n",
" return self.estimator.predict_proba(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/xgboost/sklearn.py\", line 477, in predict_proba\n",
" ntree_limit=ntree_limit)\n",
" File \"/opt/conda/lib/python3.5/site-packages/xgboost/core.py\", line 939, in predict\n",
" self._validate_features(data)\n",
" File \"/opt/conda/lib/python3.5/site-packages/xgboost/core.py\", line 1179, in _validate_features\n",
" data.feature_names))\n",
"ValueError: feature_names mismatch: ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27', 'f28', 'f29', 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', 'f38', 'f39', 'f40', 'f41', 'f42', 'f43', 'f44', 'f45', 'f46', 'f47', 'f48', 'f49', 'f50', 'f51', 'f52', 'f53', 'f54', 'f55', 'f56', 'f57', 'f58', 'f59', 'f60', 'f61', 'f62', 'f63', 'f64', 'f65', 'f66', 'f67', 'f68', 'f69', 'f70', 'f71', 'f72', 'f73', 'f74', 'f75', 'f76', 'f77', 'f78', 'f79', 'f80', 'f81', 'f82', 'f83', 'f84', 'f85', 'f86', 'f87', 'f88', 'f89', 'f90', 'f91', 'f92', 'f93', 'f94', 'f95', 'f96', 'f97', 'f98', 'f99', 'f100', 'f101', 'f102', 'f103', 'f104', 'f105', 'f106', 'f107', 'f108', 'f109', 'f110', 'f111', 'f112', 'f113', 'f114', 'f115', 'f116', 'f117', 'f118', 'f119', 'f120', 'f121', 'f122', 'f123', 'f124', 'f125', 'f126', 'f127', 'f128', 'f129', 'f130', 'f131', 'f132', 'f133', 'f134', 'f135', 'f136', 'f137', 'f138', 'f139', 'f140', 'f141', 'f142', 'f143', 'f144', 'f145', 'f146', 'f147', 'f148', 'f149', 'f150', 'f151', 'f152', 'f153', 'f154', 'f155', 'f156', 'f157', 'f158', 'f159', 'f160', 'f161', 'f162', 'f163', 'f164', 'f165', 'f166', 'f167', 'f168', 'f169', 'f170', 'f171', 'f172', 'f173', 'f174', 'f175', 'f176', 'f177', 'f178', 'f179', 'f180', 'f181', 'f182', 'f183', 'f184', 'f185', 'f186', 'f187', 'f188', 'f189', 'f190', 'f191', 'f192', 'f193', 'f194', 'f195', 'f196', 'f197', 'f198', 'f199', 'f200', 'f201', 'f202', 'f203', 'f204', 'f205', 'f206', 'f207', 'f208', 'f209', 'f210', 'f211', 'f212', 'f213', 'f214', 'f215', 'f216', 'f217', 'f218', 'f219', 'f220', 'f221', 'f222', 'f223', 'f224', 'f225', 'f226', 'f227', 'f228', 'f229', 'f230', 'f231', 'f232', 'f233', 'f234', 'f235', 'f236', 'f237', 'f238', 'f239', 'f240', 'f241', 'f242', 'f243', 'f244', 'f245', 'f246', 'f247', 'f248', 'f249', 'f250', 'f251', 'f252', 'f253', 'f254', 'f255', 'f256', 'f257', 'f258', 'f259', 'f260', 'f261', 'f262', 'f263', 'f264', 'f265', 'f266', 'f267', 'f268', 'f269', 'f270', 'f271', 'f272', 'f273', 'f274', 'f275', 'f276', 'f277', 'f278', 'f279', 'f280', 'f281', 'f282', 'f283', 'f284', 'f285', 'f286', 'f287', 'f288', 'f289', 'f290', 'f291', 'f292', 'f293', 'f294', 'f295', 'f296', 'f297', 'f298', 'f299', 'f300', 'f301', 'f302', 'f303', 'f304', 'f305', 'f306', 'f307', 'f308', 'f309', 'f310', 'f311', 'f312', 'f313', 'f314', 'f315', 'f316', 'f317', 'f318', 'f319', 'f320', 'f321', 'f322', 'f323', 'f324', 'f325', 'f326', 'f327', 'f328', 'f329', 'f330', 'f331', 'f332', 'f333', 'f334', 'f335', 'f336', 'f337', 'f338', 'f339', 'f340', 'f341', 'f342', 'f343', 'f344', 'f345', 'f346', 'f347', 'f348', 'f349', 'f350', 'f351', 'f352', 'f353', 'f354', 'f355', 'f356', 'f357', 'f358', 'f359', 'f360', 'f361', 'f362', 'f363', 'f364', 'f365', 'f366', 'f367', 'f368', 'f369', 'f370', 'f371', 'f372', 'f373', 'f374', 'f375', 'f376', 'f377', 'f378', 'f379', 'f380', 'f381', 'f382', 'f383', 'f384', 'f385', 'f386', 'f387', 'f388', 'f389', 'f390', 'f391', 'f392', 'f393', 'f394', 'f395', 'f396', 'f397', 'f398', 'f399', 'f400', 'f401', 'f402', 'f403', 'f404', 'f405', 'f406', 'f407', 'f408', 'f409', 'f410', 'f411', 'f412', 'f413', 'f414', 'f415', 'f416', 'f417', 'f418', 'f419', 'f420', 'f421', 'f422', 'f423', 'f424', 'f425', 'f426', 'f427', 'f428', 'f429', 'f430', 'f431', 'f432', 'f433', 'f434', 'f435', 'f436', 'f437', 'f438', 'f439', 'f440', 'f441', 'f442', 'f443', 'f444', 'f445', 'f446', 'f447', 'f448', 'f449', 'f450', 'f451', 'f452', 'f453', 'f454', 'f455', 'f456', 'f457', 'f458', 'f459', 'f460', 'f461', 'f462', 'f463', 'f464', 'f465', 'f466', 'f467', 'f468', 'f469', 'f470', 'f471', 'f472', 'f473', 'f474', 'f475', 'f476', 'f477', 'f478', 'f479', 'f480', 'f481', 'f482', 'f483', 'f484', 'f485', 'f486', 'f487', 'f488', 'f489', 'f490', 'f491', 'f492', 'f493', 'f494', 'f495', 'f496', 'f497', 'f498', 'f499', 'f500', 'f501', 'f502', 'f503', 'f504', 'f505', 'f506', 'f507', 'f508', 'f509', 'f510', 'f511', 'f512', 'f513', 'f514', 'f515', 'f516', 'f517', 'f518', 'f519', 'f520', 'f521', 'f522', 'f523', 'f524', 'f525', 'f526', 'f527', 'f528', 'f529', 'f530', 'f531', 'f532', 'f533', 'f534', 'f535', 'f536', 'f537', 'f538', 'f539', 'f540', 'f541', 'f542', 'f543', 'f544', 'f545', 'f546', 'f547', 'f548', 'f549', 'f550', 'f551', 'f552', 'f553', 'f554', 'f555', 'f556', 'f557', 'f558', 'f559', 'f560', 'f561', 'f562', 'f563', 'f564', 'f565', 'f566', 'f567', 'f568', 'f569', 'f570', 'f571', 'f572', 'f573', 'f574', 'f575', 'f576', 'f577', 'f578', 'f579', 'f580', 'f581', 'f582', 'f583', 'f584', 'f585', 'f586', 'f587', 'f588', 'f589', 'f590', 'f591', 'f592', 'f593', 'f594', 'f595', 'f596', 'f597', 'f598', 'f599', 'f600', 'f601', 'f602', 'f603', 'f604', 'f605', 'f606', 'f607', 'f608', 'f609', 'f610', 'f611', 'f612', 'f613', 'f614', 'f615', 'f616', 'f617', 'f618', 'f619', 'f620', 'f621', 'f622', 'f623', 'f624', 'f625', 'f626', 'f627', 'f628', 'f629', 'f630', 'f631', 'f632', 'f633', 'f634', 'f635', 'f636', 'f637', 'f638', 'f639', 'f640', 'f641', 'f642', 'f643', 'f644', 'f645', 'f646', 'f647', 'f648', 'f649', 'f650', 'f651', 'f652', 'f653', 'f654', 'f655', 'f656', 'f657', 'f658', 'f659', 'f660', 'f661', 'f662', 'f663', 'f664', 'f665', 'f666', 'f667', 'f668', 'f669', 'f670', 'f671', 'f672', 'f673', 'f674', 'f675', 'f676', 'f677', 'f678', 'f679', 'f680', 'f681', 'f682', 'f683', 'f684', 'f685', 'f686', 'f687', 'f688', 'f689', 'f690', 'f691', 'f692', 'f693', 'f694', 'f695', 'f696', 'f697', 'f698', 'f699', 'f700', 'f701', 'f702', 'f703', 'f704', 'f705', 'f706', 'f707', 'f708', 'f709', 'f710', 'f711', 'f712', 'f713', 'f714', 'f715', 'f716', 'f717', 'f718', 'f719', 'f720', 'f721', 'f722', 'f723', 'f724', 'f725', 'f726', 'f727', 'f728', 'f729', 'f730', 'f731', 'f732', 'f733', 'f734', 'f735', 'f736', 'f737', 'f738', 'f739', 'f740', 'f741', 'f742', 'f743', 'f744', 'f745', 'f746', 'f747', 'f748', 'f749', 'f750', 'f751', 'f752', 'f753', 'f754', 'f755', 'f756', 'f757', 'f758', 'f759', 'f760', 'f761', 'f762', 'f763', 'f764', 'f765', 'f766', 'f767', 'f768', 'f769', 'f770', 'f771', 'f772', 'f773', 'f774', 'f775', 'f776', 'f777', 'f778', 'f779', 'f780', 'f781', 'f782', 'f783', 'f784', 'f785', 'f786', 'f787', 'f788', 'f789', 'f790', 'f791', 'f792', 'f793', 'f794', 'f795', 'f796', 'f797', 'f798', 'f799', 'f800', 'f801', 'f802', 'f803', 'f804', 'f805', 'f806', 'f807', 'f808', 'f809', 'f810', 'f811', 'f812', 'f813', 'f814', 'f815', 'f816', 'f817', 'f818', 'f819', 'f820', 'f821', 'f822', 'f823', 'f824', 'f825', 'f826', 'f827', 'f828', 'f829', 'f830', 'f831', 'f832', 'f833', 'f834', 'f835', 'f836', 'f837', 'f838', 'f839', 'f840', 'f841', 'f842', 'f843', 'f844', 'f845', 'f846', 'f847', 'f848', 'f849', 'f850', 'f851', 'f852', 'f853', 'f854', 'f855', 'f856', 'f857', 'f858', 'f859', 'f860', 'f861', 'f862', 'f863', 'f864', 'f865', 'f866', 'f867', 'f868', 'f869', 'f870', 'f871', 'f872', 'f873', 'f874', 'f875', 'f876', 'f877', 'f878', 'f879', 'f880', 'f881', 'f882', 'f883', 'f884', 'f885', 'f886', 'f887', 'f888', 'f889', 'f890', 'f891', 'f892', 'f893', 'f894', 'f895', 'f896', 'f897', 'f898', 'f899', 'f900', 'f901', 'f902', 'f903', 'f904', 'f905', 'f906', 'f907', 'f908', 'f909', 'f910', 'f911', 'f912', 'f913', 'f914', 'f915', 'f916', 'f917', 'f918', 'f919', 'f920', 'f921', 'f922', 'f923', 'f924', 'f925', 'f926', 'f927', 'f928', 'f929', 'f930', 'f931', 'f932', 'f933', 'f934', 'f935', 'f936', 'f937', 'f938', 'f939', 'f940', 'f941', 'f942', 'f943', 'f944', 'f945', 'f946', 'f947', 'f948', 'f949', 'f950', 'f951', 'f952', 'f953', 'f954', 'f955', 'f956', 'f957', 'f958', 'f959', 'f960', 'f961', 'f962', 'f963', 'f964', 'f965', 'f966', 'f967', 'f968', 'f969', 'f970', 'f971', 'f972', 'f973', 'f974', 'f975', 'f976', 'f977', 'f978', 'f979', 'f980', 'f981', 'f982', 'f983', 'f984', 'f985', 'f986', 'f987', 'f988', 'f989', 'f990', 'f991', 'f992', 'f993', 'f994', 'f995', 'f996', 'f997', 'f998', 'f999', 'f1000', 'f1001', 'f1002', 'f1003', 'f1004', 'f1005', 'f1006', 'f1007', 'f1008', 'f1009', 'f1010', 'f1011', 'f1012', 'f1013', 'f1014', 'f1015', 'f1016', 'f1017', 'f1018', 'f1019', 'f1020', 'f1021', 'f1022', 'f1023', 'f1024', 'f1025', 'f1026', 'f1027', 'f1028', 'f1029', 'f1030', 'f1031', 'f1032', 'f1033', 'f1034', 'f1035', 'f1036', 'f1037', 'f1038', 'f1039', 'f1040', 'f1041', 'f1042', 'f1043', 'f1044', 'f1045', 'f1046', 'f1047', 'f1048', 'f1049', 'f1050', 'f1051', 'f1052', 'f1053', 'f1054', 'f1055', 'f1056', 'f1057', 'f1058', 'f1059', 'f1060', 'f1061', 'f1062', 'f1063', 'f1064', 'f1065', 'f1066', 'f1067', 'f1068', 'f1069', 'f1070', 'f1071', 'f1072', 'f1073', 'f1074', 'f1075', 'f1076', 'f1077', 'f1078', 'f1079', 'f1080', 'f1081', 'f1082', 'f1083', 'f1084', 'f1085', 'f1086', 'f1087', 'f1088', 'f1089', 'f1090', 'f1091', 'f1092', 'f1093', 'f1094', 'f1095', 'f1096', 'f1097', 'f1098', 'f1099', 'f1100', 'f1101', 'f1102', 'f1103', 'f1104', 'f1105', 'f1106', 'f1107', 'f1108', 'f1109', 'f1110', 'f1111', 'f1112', 'f1113', 'f1114', 'f1115', 'f1116', 'f1117', 'f1118', 'f1119', 'f1120', 'f1121', 'f1122', 'f1123', 'f1124', 'f1125', 'f1126', 'f1127', 'f1128', 'f1129', 'f1130', 'f1131', 'f1132', 'f1133', 'f1134', 'f1135', 'f1136', 'f1137', 'f1138', 'f1139', 'f1140', 'f1141', 'f1142', 'f1143', 'f1144', 'f1145', 'f1146', 'f1147', 'f1148', 'f1149', 'f1150', 'f1151', 'f1152', 'f1153', 'f1154', 'f1155', 'f1156', 'f1157', 'f1158', 'f1159', 'f1160', 'f1161', 'f1162', 'f1163', 'f1164', 'f1165', 'f1166', 'f1167', 'f1168', 'f1169', 'f1170', 'f1171', 'f1172', 'f1173', 'f1174', 'f1175', 'f1176', 'f1177', 'f1178', 'f1179', 'f1180', 'f1181', 'f1182', 'f1183', 'f1184', 'f1185', 'f1186', 'f1187', 'f1188', 'f1189', 'f1190', 'f1191', 'f1192', 'f1193', 'f1194', 'f1195', 'f1196', 'f1197', 'f1198', 'f1199', 'f1200', 'f1201', 'f1202', 'f1203', 'f1204', 'f1205', 'f1206', 'f1207', 'f1208', 'f1209', 'f1210', 'f1211', 'f1212', 'f1213', 'f1214', 'f1215', 'f1216', 'f1217', 'f1218', 'f1219', 'f1220', 'f1221', 'f1222', 'f1223', 'f1224', 'f1225', 'f1226', 'f1227', 'f1228', 'f1229', 'f1230', 'f1231', 'f1232', 'f1233', 'f1234', 'f1235', 'f1236', 'f1237', 'f1238', 'f1239', 'f1240', 'f1241', 'f1242', 'f1243', 'f1244', 'f1245', 'f1246', 'f1247', 'f1248', 'f1249', 'f1250', 'f1251', 'f1252', 'f1253', 'f1254', 'f1255', 'f1256', 'f1257', 'f1258', 'f1259', 'f1260', 'f1261', 'f1262', 'f1263', 'f1264', 'f1265', 'f1266', 'f1267', 'f1268', 'f1269', 'f1270', 'f1271', 'f1272', 'f1273', 'f1274', 'f1275', 'f1276', 'f1277', 'f1278', 'f1279', 'f1280', 'f1281', 'f1282', 'f1283', 'f1284', 'f1285', 'f1286', 'f1287', 'f1288', 'f1289', 'f1290', 'f1291', 'f1292', 'f1293', 'f1294', 'f1295', 'f1296', 'f1297', 'f1298', 'f1299', 'f1300', 'f1301', 'f1302', 'f1303', 'f1304', 'f1305', 'f1306', 'f1307', 'f1308', 'f1309', 'f1310', 'f1311', 'f1312', 'f1313', 'f1314', 'f1315', 'f1316', 'f1317', 'f1318', 'f1319', 'f1320', 'f1321', 'f1322', 'f1323', 'f1324', 'f1325', 'f1326', 'f1327', 'f1328', 'f1329', 'f1330', 'f1331', 'f1332', 'f1333', 'f1334', 'f1335', 'f1336', 'f1337', 'f1338', 'f1339', 'f1340', 'f1341', 'f1342', 'f1343', 'f1344', 'f1345', 'f1346', 'f1347', 'f1348', 'f1349', 'f1350', 'f1351', 'f1352', 'f1353', 'f1354', 'f1355', 'f1356', 'f1357', 'f1358', 'f1359', 'f1360', 'f1361', 'f1362', 'f1363', 'f1364', 'f1365', 'f1366', 'f1367', 'f1368', 'f1369', 'f1370', 'f1371', 'f1372', 'f1373', 'f1374', 'f1375', 'f1376', 'f1377', 'f1378', 'f1379', 'f1380', 'f1381', 'f1382', 'f1383', 'f1384', 'f1385', 'f1386', 'f1387', 'f1388', 'f1389', 'f1390', 'f1391', 'f1392', 'f1393', 'f1394', 'f1395', 'f1396', 'f1397', 'f1398', 'f1399', 'f1400', 'f1401', 'f1402', 'f1403', 'f1404', 'f1405', 'f1406', 'f1407', 'f1408', 'f1409', 'f1410', 'f1411', 'f1412', 'f1413', 'f1414', 'f1415', 'f1416', 'f1417', 'f1418', 'f1419', 'f1420', 'f1421', 'f1422', 'f1423', 'f1424', 'f1425', 'f1426', 'f1427', 'f1428', 'f1429', 'f1430', 'f1431', 'f1432', 'f1433', 'f1434', 'f1435', 'f1436', 'f1437', 'f1438', 'f1439', 'f1440', 'f1441', 'f1442', 'f1443', 'f1444', 'f1445', 'f1446', 'f1447', 'f1448', 'f1449', 'f1450', 'f1451', 'f1452', 'f1453', 'f1454', 'f1455', 'f1456', 'f1457', 'f1458', 'f1459', 'f1460', 'f1461', 'f1462', 'f1463', 'f1464', 'f1465', 'f1466', 'f1467', 'f1468', 'f1469', 'f1470', 'f1471', 'f1472', 'f1473', 'f1474', 'f1475', 'f1476', 'f1477', 'f1478', 'f1479', 'f1480', 'f1481', 'f1482', 'f1483', 'f1484', 'f1485', 'f1486', 'f1487', 'f1488', 'f1489', 'f1490', 'f1491', 'f1492', 'f1493', 'f1494', 'f1495', 'f1496', 'f1497', 'f1498', 'f1499', 'f1500', 'f1501', 'f1502', 'f1503', 'f1504', 'f1505', 'f1506', 'f1507', 'f1508', 'f1509', 'f1510', 'f1511', 'f1512', 'f1513', 'f1514', 'f1515', 'f1516', 'f1517', 'f1518', 'f1519', 'f1520', 'f1521', 'f1522', 'f1523', 'f1524', 'f1525', 'f1526', 'f1527', 'f1528', 'f1529', 'f1530', 'f1531', 'f1532', 'f1533', 'f1534', 'f1535', 'f1536', 'f1537', 'f1538', 'f1539', 'f1540', 'f1541', 'f1542', 'f1543', 'f1544', 'f1545', 'f1546', 'f1547', 'f1548', 'f1549', 'f1550', 'f1551', 'f1552', 'f1553', 'f1554', 'f1555', 'f1556', 'f1557', 'f1558', 'f1559', 'f1560', 'f1561', 'f1562', 'f1563', 'f1564', 'f1565', 'f1566', 'f1567', 'f1568', 'f1569', 'f1570', 'f1571', 'f1572', 'f1573', 'f1574', 'f1575', 'f1576', 'f1577', 'f1578', 'f1579', 'f1580', 'f1581', 'f1582', 'f1583', 'f1584', 'f1585', 'f1586', 'f1587', 'f1588', 'f1589', 'f1590', 'f1591', 'f1592', 'f1593', 'f1594', 'f1595', 'f1596', 'f1597', 'f1598', 'f1599', 'f1600', 'f1601', 'f1602', 'f1603', 'f1604', 'f1605', 'f1606', 'f1607', 'f1608', 'f1609', 'f1610', 'f1611', 'f1612', 'f1613', 'f1614', 'f1615', 'f1616', 'f1617', 'f1618', 'f1619', 'f1620', 'f1621', 'f1622', 'f1623', 'f1624', 'f1625', 'f1626', 'f1627', 'f1628', 'f1629', 'f1630', 'f1631', 'f1632', 'f1633', 'f1634', 'f1635', 'f1636', 'f1637', 'f1638', 'f1639', 'f1640', 'f1641', 'f1642', 'f1643', 'f1644', 'f1645', 'f1646', 'f1647', 'f1648', 'f1649', 'f1650', 'f1651', 'f1652', 'f1653', 'f1654', 'f1655', 'f1656', 'f1657', 'f1658', 'f1659', 'f1660', 'f1661', 'f1662', 'f1663', 'f1664', 'f1665', 'f1666', 'f1667', 'f1668', 'f1669', 'f1670', 'f1671', 'f1672', 'f1673', 'f1674', 'f1675', 'f1676', 'f1677', 'f1678', 'f1679', 'f1680', 'f1681', 'f1682', 'f1683', 'f1684', 'f1685', 'f1686', 'f1687', 'f1688', 'f1689', 'f1690', 'f1691', 'f1692', 'f1693', 'f1694', 'f1695', 'f1696', 'f1697', 'f1698', 'f1699', 'f1700', 'f1701', 'f1702', 'f1703', 'f1704', 'f1705', 'f1706', 'f1707', 'f1708', 'f1709', 'f1710', 'f1711', 'f1712', 'f1713', 'f1714', 'f1715', 'f1716', 'f1717', 'f1718', 'f1719', 'f1720', 'f1721', 'f1722', 'f1723', 'f1724', 'f1725', 'f1726', 'f1727', 'f1728', 'f1729', 'f1730', 'f1731', 'f1732', 'f1733', 'f1734', 'f1735', 'f1736', 'f1737', 'f1738', 'f1739', 'f1740', 'f1741', 'f1742', 'f1743', 'f1744', 'f1745', 'f1746', 'f1747', 'f1748', 'f1749', 'f1750', 'f1751', 'f1752', 'f1753', 'f1754', 'f1755', 'f1756', 'f1757', 'f1758', 'f1759', 'f1760', 'f1761', 'f1762', 'f1763', 'f1764', 'f1765', 'f1766', 'f1767', 'f1768', 'f1769', 'f1770', 'f1771', 'f1772', 'f1773', 'f1774', 'f1775', 'f1776', 'f1777', 'f1778', 'f1779', 'f1780', 'f1781', 'f1782', 'f1783', 'f1784', 'f1785', 'f1786', 'f1787', 'f1788', 'f1789', 'f1790', 'f1791', 'f1792', 'f1793', 'f1794', 'f1795', 'f1796', 'f1797', 'f1798', 'f1799', 'f1800', 'f1801', 'f1802', 'f1803', 'f1804', 'f1805', 'f1806', 'f1807', 'f1808', 'f1809', 'f1810', 'f1811', 'f1812', 'f1813', 'f1814', 'f1815', 'f1816', 'f1817', 'f1818', 'f1819', 'f1820', 'f1821', 'f1822', 'f1823', 'f1824', 'f1825', 'f1826', 'f1827', 'f1828', 'f1829', 'f1830', 'f1831', 'f1832', 'f1833', 'f1834', 'f1835', 'f1836', 'f1837', 'f1838', 'f1839', 'f1840', 'f1841', 'f1842', 'f1843', 'f1844', 'f1845', 'f1846', 'f1847', 'f1848', 'f1849', 'f1850', 'f1851', 'f1852', 'f1853', 'f1854', 'f1855', 'f1856', 'f1857', 'f1858', 'f1859', 'f1860', 'f1861', 'f1862', 'f1863', 'f1864', 'f1865', 'f1866', 'f1867', 'f1868', 'f1869', 'f1870', 'f1871', 'f1872', 'f1873', 'f1874', 'f1875', 'f1876', 'f1877', 'f1878', 'f1879', 'f1880', 'f1881', 'f1882', 'f1883', 'f1884', 'f1885', 'f1886', 'f1887', 'f1888', 'f1889', 'f1890', 'f1891', 'f1892', 'f1893', 'f1894', 'f1895', 'f1896', 'f1897', 'f1898', 'f1899', 'f1900', 'f1901', 'f1902', 'f1903', 'f1904', 'f1905', 'f1906', 'f1907', 'f1908', 'f1909', 'f1910', 'f1911', 'f1912', 'f1913', 'f1914', 'f1915', 'f1916', 'f1917', 'f1918', 'f1919', 'f1920', 'f1921', 'f1922', 'f1923', 'f1924', 'f1925', 'f1926', 'f1927', 'f1928', 'f1929', 'f1930', 'f1931', 'f1932', 'f1933', 'f1934', 'f1935', 'f1936', 'f1937', 'f1938', 'f1939', 'f1940', 'f1941', 'f1942', 'f1943', 'f1944', 'f1945', 'f1946', 'f1947', 'f1948', 'f1949', 'f1950', 'f1951', 'f1952', 'f1953', 'f1954', 'f1955', 'f1956', 'f1957', 'f1958', 'f1959', 'f1960', 'f1961', 'f1962', 'f1963', 'f1964', 'f1965', 'f1966', 'f1967', 'f1968', 'f1969', 'f1970', 'f1971', 'f1972', 'f1973', 'f1974', 'f1975', 'f1976', 'f1977', 'f1978', 'f1979', 'f1980', 'f1981', 'f1982', 'f1983', 'f1984', 'f1985', 'f1986', 'f1987', 'f1988', 'f1989', 'f1990', 'f1991', 'f1992', 'f1993', 'f1994', 'f1995', 'f1996', 'f1997', 'f1998', 'f1999', 'f2000', 'f2001', 'f2002', 'f2003', 'f2004', 'f2005', 'f2006', 'f2007', 'f2008', 'f2009', 'f2010', 'f2011', 'f2012', 'f2013', 'f2014', 'f2015', 'f2016', 'f2017', 'f2018', 'f2019', 'f2020', 'f2021', 'f2022', 'f2023', 'f2024', 'f2025', 'f2026', 'f2027', 'f2028', 'f2029', 'f2030', 'f2031', 'f2032', 'f2033', 'f2034', 'f2035', 'f2036', 'f2037', 'f2038', 'f2039', 'f2040', 'f2041', 'f2042', 'f2043', 'f2044', 'f2045', 'f2046', 'f2047', 'f2048', 'f2049', 'f2050', 'f2051', 'f2052', 'f2053', 'f2054', 'f2055', 'f2056', 'f2057', 'f2058', 'f2059', 'f2060', 'f2061', 'f2062', 'f2063', 'f2064', 'f2065', 'f2066', 'f2067', 'f2068', 'f2069', 'f2070', 'f2071', 'f2072', 'f2073', 'f2074', 'f2075', 'f2076', 'f2077', 'f2078', 'f2079', 'f2080', 'f2081', 'f2082', 'f2083', 'f2084', 'f2085', 'f2086', 'f2087', 'f2088', 'f2089', 'f2090', 'f2091', 'f2092', 'f2093', 'f2094', 'f2095', 'f2096', 'f2097', 'f2098', 'f2099', 'f2100', 'f2101', 'f2102', 'f2103', 'f2104', 'f2105', 'f2106', 'f2107', 'f2108', 'f2109', 'f2110', 'f2111', 'f2112', 'f2113', 'f2114', 'f2115', 'f2116', 'f2117', 'f2118', 'f2119', 'f2120', 'f2121', 'f2122', 'f2123', 'f2124', 'f2125', 'f2126', 'f2127', 'f2128', 'f2129', 'f2130', 'f2131', 'f2132', 'f2133', 'f2134', 'f2135', 'f2136', 'f2137', 'f2138', 'f2139', 'f2140', 'f2141', 'f2142', 'f2143', 'f2144', 'f2145', 'f2146', 'f2147', 'f2148', 'f2149', 'f2150', 'f2151', 'f2152', 'f2153', 'f2154', 'f2155', 'f2156', 'f2157', 'f2158', 'f2159', 'f2160', 'f2161', 'f2162', 'f2163', 'f2164', 'f2165', 'f2166', 'f2167', 'f2168', 'f2169', 'f2170', 'f2171', 'f2172', 'f2173', 'f2174', 'f2175', 'f2176', 'f2177', 'f2178', 'f2179', 'f2180', 'f2181', 'f2182', 'f2183', 'f2184', 'f2185', 'f2186', 'f2187', 'f2188', 'f2189', 'f2190', 'f2191', 'f2192', 'f2193', 'f2194', 'f2195', 'f2196', 'f2197', 'f2198', 'f2199', 'f2200', 'f2201', 'f2202', 'f2203', 'f2204', 'f2205', 'f2206', 'f2207', 'f2208', 'f2209', 'f2210', 'f2211', 'f2212', 'f2213', 'f2214', 'f2215', 'f2216', 'f2217', 'f2218', 'f2219', 'f2220', 'f2221', 'f2222', 'f2223', 'f2224', 'f2225', 'f2226', 'f2227', 'f2228', 'f2229', 'f2230', 'f2231', 'f2232', 'f2233', 'f2234', 'f2235', 'f2236', 'f2237', 'f2238', 'f2239', 'f2240', 'f2241', 'f2242', 'f2243', 'f2244', 'f2245', 'f2246', 'f2247', 'f2248', 'f2249', 'f2250', 'f2251', 'f2252', 'f2253', 'f2254', 'f2255', 'f2256', 'f2257', 'f2258', 'f2259', 'f2260', 'f2261', 'f2262', 'f2263', 'f2264', 'f2265', 'f2266', 'f2267', 'f2268', 'f2269', 'f2270', 'f2271', 'f2272', 'f2273', 'f2274', 'f2275', 'f2276', 'f2277', 'f2278', 'f2279', 'f2280', 'f2281', 'f2282', 'f2283', 'f2284', 'f2285', 'f2286', 'f2287', 'f2288', 'f2289', 'f2290', 'f2291', 'f2292', 'f2293', 'f2294', 'f2295', 'f2296', 'f2297', 'f2298', 'f2299', 'f2300', 'f2301', 'f2302', 'f2303', 'f2304', 'f2305', 'f2306', 'f2307', 'f2308', 'f2309', 'f2310', 'f2311', 'f2312', 'f2313', 'f2314', 'f2315', 'f2316', 'f2317', 'f2318', 'f2319', 'f2320', 'f2321', 'f2322', 'f2323', 'f2324', 'f2325', 'f2326', 'f2327', 'f2328', 'f2329', 'f2330', 'f2331', 'f2332', 'f2333', 'f2334', 'f2335', 'f2336', 'f2337', 'f2338', 'f2339', 'f2340', 'f2341', 'f2342', 'f2343', 'f2344', 'f2345', 'f2346', 'f2347', 'f2348', 'f2349', 'f2350', 'f2351', 'f2352', 'f2353', 'f2354', 'f2355', 'f2356', 'f2357', 'f2358', 'f2359', 'f2360', 'f2361', 'f2362', 'f2363', 'f2364', 'f2365', 'f2366', 'f2367', 'f2368', 'f2369', 'f2370', 'f2371', 'f2372', 'f2373', 'f2374', 'f2375', 'f2376', 'f2377', 'f2378', 'f2379', 'f2380', 'f2381', 'f2382', 'f2383', 'f2384', 'f2385', 'f2386', 'f2387', 'f2388', 'f2389', 'f2390', 'f2391', 'f2392', 'f2393', 'f2394', 'f2395', 'f2396', 'f2397'] ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27', 'f28', 'f29', 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', 'f38', 'f39', 'f40', 'f41', 'f42', 'f43', 'f44', 'f45', 'f46', 'f47', 'f48', 'f49', 'f50', 'f51', 'f52', 'f53', 'f54', 'f55', 'f56', 'f57', 'f58', 'f59', 'f60', 'f61', 'f62', 'f63', 'f64', 'f65', 'f66', 'f67', 'f68', 'f69', 'f70', 'f71', 'f72', 'f73', 'f74', 'f75', 'f76', 'f77', 'f78', 'f79', 'f80', 'f81', 'f82', 'f83', 'f84', 'f85', 'f86', 'f87', 'f88', 'f89', 'f90', 'f91', 'f92', 'f93', 'f94', 'f95', 'f96', 'f97', 'f98', 'f99', 'f100', 'f101', 'f102', 'f103', 'f104', 'f105', 'f106', 'f107', 'f108', 'f109', 'f110', 'f111', 'f112', 'f113', 'f114', 'f115', 'f116', 'f117', 'f118', 'f119', 'f120', 'f121', 'f122', 'f123', 'f124', 'f125', 'f126', 'f127', 'f128', 'f129', 'f130', 'f131', 'f132', 'f133', 'f134', 'f135', 'f136', 'f137', 'f138', 'f139', 'f140', 'f141', 'f142', 'f143', 'f144', 'f145', 'f146', 'f147', 'f148', 'f149', 'f150', 'f151', 'f152', 'f153', 'f154', 'f155', 'f156', 'f157', 'f158', 'f159', 'f160', 'f161', 'f162', 'f163', 'f164', 'f165', 'f166', 'f167', 'f168', 'f169', 'f170', 'f171', 'f172', 'f173', 'f174', 'f175', 'f176', 'f177', 'f178', 'f179', 'f180', 'f181', 'f182', 'f183', 'f184', 'f185', 'f186', 'f187', 'f188', 'f189', 'f190', 'f191', 'f192', 'f193', 'f194', 'f195', 'f196', 'f197', 'f198', 'f199', 'f200', 'f201', 'f202', 'f203', 'f204', 'f205', 'f206', 'f207', 'f208', 'f209', 'f210', 'f211', 'f212', 'f213', 'f214', 'f215', 'f216', 'f217', 'f218', 'f219', 'f220', 'f221', 'f222', 'f223', 'f224', 'f225', 'f226', 'f227', 'f228', 'f229', 'f230', 'f231', 'f232', 'f233', 'f234', 'f235', 'f236', 'f237', 'f238', 'f239', 'f240', 'f241', 'f242', 'f243', 'f244', 'f245', 'f246', 'f247', 'f248', 'f249', 'f250', 'f251', 'f252', 'f253', 'f254', 'f255', 'f256', 'f257', 'f258', 'f259', 'f260', 'f261', 'f262', 'f263', 'f264', 'f265', 'f266', 'f267', 'f268', 'f269', 'f270', 'f271', 'f272', 'f273', 'f274', 'f275', 'f276', 'f277', 'f278', 'f279', 'f280', 'f281', 'f282', 'f283', 'f284', 'f285', 'f286', 'f287', 'f288', 'f289', 'f290', 'f291', 'f292', 'f293', 'f294', 'f295', 'f296', 'f297', 'f298', 'f299', 'f300', 'f301', 'f302', 'f303', 'f304', 'f305', 'f306', 'f307', 'f308', 'f309', 'f310', 'f311', 'f312', 'f313', 'f314', 'f315', 'f316', 'f317', 'f318', 'f319', 'f320', 'f321', 'f322', 'f323', 'f324', 'f325', 'f326', 'f327', 'f328', 'f329', 'f330', 'f331', 'f332', 'f333', 'f334', 'f335', 'f336', 'f337', 'f338', 'f339', 'f340', 'f341', 'f342', 'f343', 'f344', 'f345', 'f346', 'f347', 'f348', 'f349', 'f350', 'f351', 'f352', 'f353', 'f354', 'f355', 'f356', 'f357', 'f358', 'f359', 'f360', 'f361', 'f362', 'f363', 'f364', 'f365', 'f366', 'f367', 'f368', 'f369', 'f370', 'f371', 'f372', 'f373', 'f374', 'f375', 'f376', 'f377', 'f378', 'f379', 'f380', 'f381', 'f382', 'f383', 'f384', 'f385', 'f386', 'f387', 'f388', 'f389', 'f390', 'f391', 'f392', 'f393', 'f394', 'f395', 'f396', 'f397', 'f398', 'f399', 'f400', 'f401', 'f402', 'f403', 'f404', 'f405', 'f406', 'f407', 'f408', 'f409', 'f410', 'f411', 'f412', 'f413', 'f414', 'f415', 'f416', 'f417', 'f418', 'f419', 'f420', 'f421', 'f422', 'f423', 'f424', 'f425', 'f426', 'f427', 'f428', 'f429', 'f430', 'f431', 'f432', 'f433', 'f434', 'f435', 'f436', 'f437', 'f438', 'f439', 'f440', 'f441', 'f442', 'f443', 'f444', 'f445', 'f446', 'f447', 'f448', 'f449', 'f450', 'f451', 'f452', 'f453', 'f454', 'f455', 'f456', 'f457', 'f458', 'f459', 'f460', 'f461', 'f462', 'f463', 'f464', 'f465', 'f466', 'f467', 'f468', 'f469', 'f470', 'f471', 'f472', 'f473', 'f474', 'f475', 'f476', 'f477', 'f478', 'f479', 'f480', 'f481', 'f482', 'f483', 'f484', 'f485', 'f486', 'f487', 'f488', 'f489', 'f490', 'f491', 'f492', 'f493', 'f494', 'f495', 'f496', 'f497', 'f498', 'f499', 'f500', 'f501', 'f502', 'f503', 'f504', 'f505', 'f506', 'f507', 'f508', 'f509', 'f510', 'f511', 'f512', 'f513', 'f514', 'f515', 'f516', 'f517', 'f518', 'f519', 'f520', 'f521', 'f522', 'f523', 'f524', 'f525', 'f526', 'f527', 'f528', 'f529', 'f530', 'f531', 'f532', 'f533', 'f534', 'f535', 'f536', 'f537', 'f538', 'f539', 'f540', 'f541', 'f542', 'f543', 'f544', 'f545', 'f546', 'f547', 'f548', 'f549', 'f550', 'f551', 'f552', 'f553', 'f554', 'f555', 'f556', 'f557', 'f558', 'f559', 'f560', 'f561', 'f562', 'f563', 'f564', 'f565', 'f566', 'f567', 'f568', 'f569', 'f570', 'f571', 'f572', 'f573', 'f574', 'f575', 'f576', 'f577', 'f578', 'f579', 'f580', 'f581', 'f582', 'f583', 'f584', 'f585', 'f586', 'f587', 'f588', 'f589', 'f590', 'f591', 'f592', 'f593', 'f594', 'f595', 'f596', 'f597', 'f598', 'f599', 'f600', 'f601', 'f602', 'f603', 'f604', 'f605', 'f606', 'f607', 'f608', 'f609', 'f610', 'f611', 'f612', 'f613', 'f614', 'f615', 'f616', 'f617', 'f618', 'f619', 'f620', 'f621', 'f622', 'f623', 'f624', 'f625', 'f626', 'f627', 'f628', 'f629', 'f630', 'f631', 'f632', 'f633', 'f634', 'f635', 'f636', 'f637', 'f638', 'f639', 'f640', 'f641', 'f642', 'f643', 'f644', 'f645', 'f646', 'f647', 'f648', 'f649', 'f650', 'f651', 'f652', 'f653', 'f654', 'f655', 'f656', 'f657', 'f658', 'f659', 'f660', 'f661', 'f662', 'f663', 'f664', 'f665', 'f666', 'f667', 'f668', 'f669', 'f670', 'f671', 'f672', 'f673', 'f674', 'f675', 'f676', 'f677', 'f678', 'f679', 'f680', 'f681', 'f682', 'f683', 'f684', 'f685', 'f686', 'f687', 'f688', 'f689', 'f690', 'f691', 'f692', 'f693', 'f694', 'f695', 'f696', 'f697', 'f698', 'f699', 'f700', 'f701', 'f702', 'f703', 'f704', 'f705', 'f706', 'f707', 'f708', 'f709', 'f710', 'f711', 'f712', 'f713', 'f714', 'f715', 'f716', 'f717', 'f718', 'f719', 'f720', 'f721', 'f722', 'f723', 'f724', 'f725', 'f726', 'f727', 'f728', 'f729', 'f730', 'f731', 'f732', 'f733', 'f734', 'f735', 'f736', 'f737', 'f738', 'f739', 'f740', 'f741', 'f742', 'f743', 'f744', 'f745', 'f746', 'f747', 'f748', 'f749', 'f750', 'f751', 'f752', 'f753', 'f754', 'f755', 'f756', 'f757', 'f758', 'f759', 'f760', 'f761', 'f762', 'f763', 'f764', 'f765', 'f766', 'f767', 'f768', 'f769', 'f770', 'f771', 'f772', 'f773', 'f774', 'f775', 'f776', 'f777', 'f778', 'f779', 'f780', 'f781', 'f782', 'f783', 'f784', 'f785', 'f786', 'f787', 'f788', 'f789', 'f790', 'f791', 'f792', 'f793', 'f794', 'f795', 'f796', 'f797', 'f798', 'f799', 'f800', 'f801', 'f802', 'f803', 'f804', 'f805', 'f806', 'f807', 'f808', 'f809', 'f810', 'f811', 'f812', 'f813', 'f814', 'f815', 'f816', 'f817', 'f818', 'f819', 'f820', 'f821', 'f822', 'f823', 'f824', 'f825', 'f826', 'f827', 'f828', 'f829', 'f830', 'f831', 'f832', 'f833', 'f834', 'f835', 'f836', 'f837', 'f838', 'f839', 'f840', 'f841', 'f842', 'f843', 'f844', 'f845', 'f846', 'f847', 'f848', 'f849', 'f850', 'f851', 'f852', 'f853', 'f854', 'f855', 'f856', 'f857', 'f858', 'f859', 'f860', 'f861', 'f862', 'f863', 'f864', 'f865', 'f866', 'f867', 'f868', 'f869', 'f870', 'f871', 'f872', 'f873', 'f874', 'f875', 'f876', 'f877', 'f878', 'f879', 'f880', 'f881', 'f882', 'f883', 'f884', 'f885', 'f886', 'f887', 'f888', 'f889', 'f890', 'f891', 'f892', 'f893', 'f894', 'f895', 'f896', 'f897', 'f898', 'f899', 'f900', 'f901', 'f902', 'f903', 'f904', 'f905', 'f906', 'f907', 'f908', 'f909', 'f910', 'f911', 'f912', 'f913', 'f914', 'f915', 'f916', 'f917', 'f918', 'f919', 'f920', 'f921', 'f922', 'f923', 'f924', 'f925', 'f926', 'f927', 'f928', 'f929', 'f930', 'f931', 'f932', 'f933', 'f934', 'f935', 'f936', 'f937', 'f938', 'f939', 'f940', 'f941', 'f942', 'f943', 'f944', 'f945', 'f946', 'f947', 'f948', 'f949', 'f950', 'f951', 'f952', 'f953', 'f954', 'f955', 'f956', 'f957', 'f958', 'f959', 'f960', 'f961', 'f962', 'f963', 'f964', 'f965', 'f966', 'f967', 'f968', 'f969', 'f970', 'f971', 'f972', 'f973', 'f974', 'f975', 'f976', 'f977', 'f978', 'f979', 'f980', 'f981', 'f982', 'f983', 'f984', 'f985', 'f986', 'f987', 'f988', 'f989', 'f990', 'f991', 'f992', 'f993', 'f994', 'f995', 'f996', 'f997', 'f998', 'f999', 'f1000', 'f1001', 'f1002', 'f1003', 'f1004', 'f1005', 'f1006', 'f1007', 'f1008', 'f1009', 'f1010', 'f1011', 'f1012', 'f1013', 'f1014', 'f1015', 'f1016', 'f1017', 'f1018', 'f1019', 'f1020', 'f1021', 'f1022', 'f1023', 'f1024', 'f1025', 'f1026', 'f1027', 'f1028', 'f1029', 'f1030', 'f1031', 'f1032', 'f1033', 'f1034', 'f1035', 'f1036', 'f1037', 'f1038', 'f1039', 'f1040', 'f1041', 'f1042', 'f1043', 'f1044', 'f1045', 'f1046', 'f1047', 'f1048', 'f1049', 'f1050', 'f1051', 'f1052', 'f1053', 'f1054', 'f1055', 'f1056', 'f1057', 'f1058', 'f1059', 'f1060', 'f1061', 'f1062', 'f1063', 'f1064', 'f1065', 'f1066', 'f1067', 'f1068', 'f1069', 'f1070', 'f1071', 'f1072', 'f1073', 'f1074', 'f1075', 'f1076', 'f1077', 'f1078', 'f1079', 'f1080', 'f1081', 'f1082', 'f1083', 'f1084', 'f1085', 'f1086', 'f1087', 'f1088', 'f1089', 'f1090', 'f1091', 'f1092', 'f1093', 'f1094', 'f1095', 'f1096', 'f1097', 'f1098', 'f1099', 'f1100', 'f1101', 'f1102', 'f1103', 'f1104', 'f1105', 'f1106', 'f1107', 'f1108', 'f1109', 'f1110', 'f1111', 'f1112', 'f1113', 'f1114', 'f1115', 'f1116', 'f1117', 'f1118', 'f1119', 'f1120', 'f1121', 'f1122', 'f1123', 'f1124', 'f1125', 'f1126', 'f1127', 'f1128', 'f1129', 'f1130', 'f1131', 'f1132', 'f1133', 'f1134', 'f1135', 'f1136', 'f1137', 'f1138', 'f1139', 'f1140', 'f1141', 'f1142', 'f1143', 'f1144', 'f1145', 'f1146', 'f1147', 'f1148', 'f1149', 'f1150', 'f1151', 'f1152', 'f1153', 'f1154', 'f1155', 'f1156', 'f1157', 'f1158', 'f1159', 'f1160', 'f1161', 'f1162', 'f1163', 'f1164', 'f1165', 'f1166', 'f1167', 'f1168', 'f1169', 'f1170', 'f1171', 'f1172', 'f1173', 'f1174', 'f1175', 'f1176', 'f1177', 'f1178', 'f1179', 'f1180', 'f1181', 'f1182', 'f1183', 'f1184', 'f1185', 'f1186', 'f1187', 'f1188', 'f1189', 'f1190', 'f1191', 'f1192', 'f1193', 'f1194', 'f1195', 'f1196', 'f1197', 'f1198', 'f1199', 'f1200', 'f1201', 'f1202', 'f1203', 'f1204', 'f1205', 'f1206', 'f1207', 'f1208', 'f1209', 'f1210', 'f1211', 'f1212', 'f1213', 'f1214', 'f1215', 'f1216', 'f1217', 'f1218', 'f1219', 'f1220', 'f1221', 'f1222', 'f1223', 'f1224', 'f1225', 'f1226', 'f1227', 'f1228', 'f1229', 'f1230', 'f1231', 'f1232', 'f1233', 'f1234', 'f1235', 'f1236', 'f1237', 'f1238', 'f1239', 'f1240', 'f1241', 'f1242', 'f1243', 'f1244', 'f1245', 'f1246', 'f1247', 'f1248', 'f1249', 'f1250', 'f1251', 'f1252', 'f1253', 'f1254', 'f1255', 'f1256', 'f1257', 'f1258', 'f1259', 'f1260', 'f1261', 'f1262', 'f1263', 'f1264', 'f1265', 'f1266', 'f1267', 'f1268', 'f1269', 'f1270', 'f1271', 'f1272', 'f1273', 'f1274', 'f1275', 'f1276', 'f1277', 'f1278', 'f1279', 'f1280', 'f1281', 'f1282', 'f1283', 'f1284', 'f1285', 'f1286', 'f1287', 'f1288', 'f1289', 'f1290', 'f1291', 'f1292', 'f1293', 'f1294', 'f1295', 'f1296', 'f1297', 'f1298', 'f1299', 'f1300', 'f1301', 'f1302', 'f1303', 'f1304', 'f1305', 'f1306', 'f1307', 'f1308', 'f1309', 'f1310', 'f1311', 'f1312', 'f1313', 'f1314', 'f1315', 'f1316', 'f1317', 'f1318', 'f1319', 'f1320', 'f1321', 'f1322', 'f1323', 'f1324', 'f1325', 'f1326', 'f1327', 'f1328', 'f1329', 'f1330', 'f1331', 'f1332', 'f1333', 'f1334', 'f1335', 'f1336', 'f1337', 'f1338', 'f1339', 'f1340', 'f1341', 'f1342', 'f1343', 'f1344', 'f1345', 'f1346', 'f1347', 'f1348', 'f1349', 'f1350', 'f1351', 'f1352', 'f1353', 'f1354', 'f1355', 'f1356', 'f1357', 'f1358', 'f1359', 'f1360', 'f1361', 'f1362', 'f1363', 'f1364', 'f1365', 'f1366', 'f1367', 'f1368', 'f1369', 'f1370', 'f1371', 'f1372', 'f1373', 'f1374', 'f1375', 'f1376', 'f1377', 'f1378', 'f1379', 'f1380', 'f1381', 'f1382', 'f1383', 'f1384', 'f1385', 'f1386', 'f1387', 'f1388', 'f1389', 'f1390', 'f1391', 'f1392', 'f1393', 'f1394', 'f1395', 'f1396', 'f1397', 'f1398', 'f1399', 'f1400', 'f1401', 'f1402', 'f1403', 'f1404', 'f1405', 'f1406', 'f1407', 'f1408', 'f1409', 'f1410', 'f1411', 'f1412', 'f1413', 'f1414', 'f1415', 'f1416', 'f1417', 'f1418', 'f1419', 'f1420', 'f1421', 'f1422', 'f1423', 'f1424', 'f1425', 'f1426', 'f1427', 'f1428', 'f1429', 'f1430', 'f1431', 'f1432', 'f1433', 'f1434', 'f1435', 'f1436', 'f1437', 'f1438', 'f1439', 'f1440', 'f1441', 'f1442', 'f1443', 'f1444', 'f1445', 'f1446', 'f1447', 'f1448', 'f1449', 'f1450', 'f1451', 'f1452', 'f1453', 'f1454', 'f1455', 'f1456', 'f1457', 'f1458', 'f1459', 'f1460', 'f1461', 'f1462', 'f1463', 'f1464', 'f1465', 'f1466', 'f1467', 'f1468', 'f1469', 'f1470', 'f1471', 'f1472', 'f1473', 'f1474', 'f1475', 'f1476', 'f1477', 'f1478', 'f1479', 'f1480', 'f1481', 'f1482', 'f1483', 'f1484', 'f1485', 'f1486', 'f1487', 'f1488', 'f1489', 'f1490', 'f1491', 'f1492', 'f1493', 'f1494', 'f1495', 'f1496', 'f1497', 'f1498', 'f1499', 'f1500', 'f1501', 'f1502', 'f1503', 'f1504', 'f1505', 'f1506', 'f1507', 'f1508', 'f1509', 'f1510', 'f1511', 'f1512', 'f1513', 'f1514', 'f1515', 'f1516', 'f1517', 'f1518', 'f1519', 'f1520', 'f1521', 'f1522', 'f1523', 'f1524', 'f1525', 'f1526', 'f1527', 'f1528', 'f1529', 'f1530', 'f1531', 'f1532', 'f1533', 'f1534', 'f1535', 'f1536', 'f1537', 'f1538', 'f1539', 'f1540', 'f1541', 'f1542', 'f1543', 'f1544', 'f1545', 'f1546', 'f1547', 'f1548', 'f1549', 'f1550', 'f1551', 'f1552', 'f1553', 'f1554', 'f1555', 'f1556', 'f1557', 'f1558', 'f1559', 'f1560', 'f1561', 'f1562', 'f1563', 'f1564', 'f1565', 'f1566', 'f1567', 'f1568', 'f1569', 'f1570', 'f1571', 'f1572', 'f1573', 'f1574', 'f1575', 'f1576', 'f1577', 'f1578', 'f1579', 'f1580', 'f1581', 'f1582', 'f1583', 'f1584', 'f1585', 'f1586', 'f1587', 'f1588', 'f1589', 'f1590', 'f1591', 'f1592', 'f1593', 'f1594', 'f1595', 'f1596', 'f1597', 'f1598', 'f1599', 'f1600', 'f1601', 'f1602', 'f1603', 'f1604', 'f1605', 'f1606', 'f1607', 'f1608', 'f1609', 'f1610', 'f1611', 'f1612', 'f1613', 'f1614', 'f1615', 'f1616', 'f1617', 'f1618', 'f1619', 'f1620', 'f1621', 'f1622', 'f1623', 'f1624', 'f1625', 'f1626', 'f1627', 'f1628', 'f1629', 'f1630', 'f1631', 'f1632', 'f1633', 'f1634', 'f1635', 'f1636', 'f1637', 'f1638', 'f1639', 'f1640', 'f1641', 'f1642', 'f1643', 'f1644', 'f1645', 'f1646', 'f1647', 'f1648', 'f1649', 'f1650', 'f1651', 'f1652', 'f1653', 'f1654', 'f1655', 'f1656', 'f1657', 'f1658', 'f1659', 'f1660', 'f1661', 'f1662', 'f1663', 'f1664', 'f1665', 'f1666', 'f1667', 'f1668', 'f1669', 'f1670', 'f1671', 'f1672', 'f1673', 'f1674', 'f1675', 'f1676', 'f1677', 'f1678', 'f1679', 'f1680', 'f1681', 'f1682', 'f1683', 'f1684', 'f1685', 'f1686', 'f1687', 'f1688', 'f1689', 'f1690', 'f1691', 'f1692', 'f1693', 'f1694', 'f1695', 'f1696', 'f1697', 'f1698', 'f1699', 'f1700', 'f1701', 'f1702', 'f1703', 'f1704', 'f1705', 'f1706', 'f1707', 'f1708', 'f1709', 'f1710', 'f1711', 'f1712', 'f1713', 'f1714', 'f1715', 'f1716', 'f1717', 'f1718', 'f1719', 'f1720', 'f1721', 'f1722', 'f1723', 'f1724', 'f1725', 'f1726', 'f1727', 'f1728', 'f1729', 'f1730', 'f1731', 'f1732', 'f1733', 'f1734', 'f1735', 'f1736', 'f1737', 'f1738', 'f1739', 'f1740', 'f1741', 'f1742', 'f1743', 'f1744', 'f1745', 'f1746', 'f1747', 'f1748', 'f1749', 'f1750', 'f1751', 'f1752', 'f1753', 'f1754', 'f1755', 'f1756', 'f1757', 'f1758', 'f1759', 'f1760', 'f1761', 'f1762', 'f1763', 'f1764', 'f1765', 'f1766', 'f1767', 'f1768', 'f1769', 'f1770', 'f1771', 'f1772', 'f1773', 'f1774', 'f1775', 'f1776', 'f1777', 'f1778', 'f1779', 'f1780', 'f1781', 'f1782', 'f1783', 'f1784', 'f1785', 'f1786', 'f1787', 'f1788', 'f1789', 'f1790', 'f1791', 'f1792', 'f1793', 'f1794', 'f1795', 'f1796', 'f1797', 'f1798', 'f1799', 'f1800', 'f1801', 'f1802', 'f1803', 'f1804', 'f1805', 'f1806', 'f1807', 'f1808', 'f1809', 'f1810', 'f1811', 'f1812', 'f1813', 'f1814', 'f1815', 'f1816', 'f1817', 'f1818', 'f1819', 'f1820', 'f1821', 'f1822', 'f1823', 'f1824', 'f1825', 'f1826', 'f1827', 'f1828', 'f1829', 'f1830', 'f1831', 'f1832', 'f1833', 'f1834', 'f1835', 'f1836', 'f1837', 'f1838', 'f1839', 'f1840', 'f1841', 'f1842', 'f1843', 'f1844', 'f1845', 'f1846', 'f1847', 'f1848', 'f1849', 'f1850', 'f1851', 'f1852', 'f1853', 'f1854', 'f1855', 'f1856', 'f1857', 'f1858', 'f1859', 'f1860', 'f1861', 'f1862', 'f1863', 'f1864', 'f1865', 'f1866', 'f1867', 'f1868', 'f1869', 'f1870', 'f1871', 'f1872', 'f1873', 'f1874', 'f1875', 'f1876', 'f1877', 'f1878', 'f1879', 'f1880', 'f1881', 'f1882', 'f1883', 'f1884', 'f1885', 'f1886', 'f1887', 'f1888', 'f1889', 'f1890', 'f1891', 'f1892', 'f1893', 'f1894', 'f1895', 'f1896', 'f1897', 'f1898', 'f1899', 'f1900', 'f1901', 'f1902', 'f1903', 'f1904', 'f1905', 'f1906', 'f1907', 'f1908', 'f1909', 'f1910', 'f1911', 'f1912', 'f1913', 'f1914', 'f1915', 'f1916', 'f1917', 'f1918', 'f1919', 'f1920', 'f1921', 'f1922', 'f1923', 'f1924', 'f1925', 'f1926', 'f1927', 'f1928', 'f1929', 'f1930', 'f1931', 'f1932', 'f1933', 'f1934', 'f1935', 'f1936', 'f1937', 'f1938', 'f1939', 'f1940', 'f1941', 'f1942', 'f1943', 'f1944', 'f1945', 'f1946', 'f1947', 'f1948', 'f1949', 'f1950', 'f1951', 'f1952', 'f1953', 'f1954', 'f1955', 'f1956', 'f1957', 'f1958', 'f1959', 'f1960', 'f1961', 'f1962', 'f1963', 'f1964', 'f1965', 'f1966', 'f1967', 'f1968', 'f1969', 'f1970', 'f1971', 'f1972', 'f1973', 'f1974', 'f1975', 'f1976', 'f1977', 'f1978', 'f1979', 'f1980', 'f1981', 'f1982', 'f1983', 'f1984', 'f1985', 'f1986', 'f1987', 'f1988', 'f1989', 'f1990', 'f1991', 'f1992', 'f1993', 'f1994', 'f1995', 'f1996', 'f1997', 'f1998', 'f1999', 'f2000', 'f2001', 'f2002', 'f2003', 'f2004', 'f2005', 'f2006', 'f2007', 'f2008', 'f2009', 'f2010', 'f2011', 'f2012', 'f2013', 'f2014', 'f2015', 'f2016', 'f2017', 'f2018', 'f2019', 'f2020', 'f2021', 'f2022', 'f2023', 'f2024', 'f2025', 'f2026', 'f2027', 'f2028', 'f2029', 'f2030', 'f2031', 'f2032', 'f2033', 'f2034', 'f2035', 'f2036', 'f2037', 'f2038', 'f2039', 'f2040', 'f2041', 'f2042', 'f2043', 'f2044', 'f2045', 'f2046', 'f2047', 'f2048', 'f2049', 'f2050', 'f2051', 'f2052', 'f2053', 'f2054', 'f2055', 'f2056', 'f2057', 'f2058', 'f2059', 'f2060', 'f2061', 'f2062', 'f2063', 'f2064', 'f2065', 'f2066', 'f2067', 'f2068', 'f2069', 'f2070', 'f2071', 'f2072', 'f2073', 'f2074', 'f2075', 'f2076', 'f2077', 'f2078', 'f2079', 'f2080', 'f2081', 'f2082', 'f2083', 'f2084', 'f2085', 'f2086', 'f2087', 'f2088', 'f2089', 'f2090', 'f2091', 'f2092', 'f2093', 'f2094', 'f2095', 'f2096', 'f2097', 'f2098', 'f2099', 'f2100', 'f2101', 'f2102', 'f2103', 'f2104', 'f2105', 'f2106', 'f2107', 'f2108', 'f2109', 'f2110', 'f2111', 'f2112', 'f2113', 'f2114', 'f2115', 'f2116', 'f2117', 'f2118', 'f2119', 'f2120', 'f2121', 'f2122', 'f2123', 'f2124', 'f2125', 'f2126', 'f2127', 'f2128', 'f2129', 'f2130', 'f2131', 'f2132', 'f2133', 'f2134', 'f2135', 'f2136', 'f2137', 'f2138', 'f2139', 'f2140', 'f2141', 'f2142', 'f2143', 'f2144', 'f2145', 'f2146', 'f2147', 'f2148', 'f2149', 'f2150', 'f2151', 'f2152', 'f2153', 'f2154', 'f2155', 'f2156', 'f2157', 'f2158', 'f2159', 'f2160', 'f2161', 'f2162', 'f2163', 'f2164', 'f2165', 'f2166', 'f2167', 'f2168', 'f2169', 'f2170', 'f2171', 'f2172', 'f2173', 'f2174', 'f2175', 'f2176', 'f2177', 'f2178', 'f2179', 'f2180', 'f2181', 'f2182', 'f2183', 'f2184', 'f2185', 'f2186', 'f2187', 'f2188', 'f2189', 'f2190', 'f2191', 'f2192', 'f2193', 'f2194', 'f2195', 'f2196', 'f2197', 'f2198', 'f2199', 'f2200', 'f2201', 'f2202', 'f2203', 'f2204', 'f2205', 'f2206', 'f2207', 'f2208', 'f2209', 'f2210', 'f2211', 'f2212', 'f2213', 'f2214', 'f2215', 'f2216', 'f2217', 'f2218', 'f2219', 'f2220', 'f2221', 'f2222', 'f2223', 'f2224', 'f2225', 'f2226', 'f2227', 'f2228', 'f2229', 'f2230', 'f2231', 'f2232', 'f2233', 'f2234', 'f2235', 'f2236', 'f2237', 'f2238', 'f2239', 'f2240', 'f2241', 'f2242', 'f2243', 'f2244', 'f2245', 'f2246', 'f2247', 'f2248', 'f2249', 'f2250', 'f2251', 'f2252', 'f2253', 'f2254', 'f2255', 'f2256', 'f2257', 'f2258', 'f2259', 'f2260', 'f2261', 'f2262', 'f2263', 'f2264', 'f2265', 'f2266', 'f2267', 'f2268', 'f2269', 'f2270', 'f2271', 'f2272', 'f2273', 'f2274', 'f2275', 'f2276', 'f2277', 'f2278', 'f2279', 'f2280', 'f2281', 'f2282', 'f2283', 'f2284', 'f2285', 'f2286', 'f2287', 'f2288', 'f2289', 'f2290', 'f2291', 'f2292', 'f2293', 'f2294', 'f2295', 'f2296', 'f2297', 'f2298', 'f2299', 'f2300', 'f2301', 'f2302', 'f2303', 'f2304', 'f2305', 'f2306', 'f2307', 'f2308', 'f2309', 'f2310', 'f2311', 'f2312', 'f2313', 'f2314', 'f2315', 'f2316', 'f2317', 'f2318', 'f2319', 'f2320', 'f2321', 'f2322', 'f2323', 'f2324', 'f2325', 'f2326', 'f2327', 'f2328', 'f2329', 'f2330', 'f2331', 'f2332', 'f2333', 'f2334', 'f2335', 'f2336', 'f2337', 'f2338', 'f2339', 'f2340', 'f2341', 'f2342', 'f2343', 'f2344', 'f2345', 'f2346', 'f2347', 'f2348', 'f2349', 'f2350', 'f2351', 'f2352', 'f2353', 'f2354', 'f2355', 'f2356', 'f2357', 'f2358', 'f2359', 'f2360', 'f2361', 'f2362', 'f2363', 'f2364', 'f2365', 'f2366', 'f2367', 'f2368', 'f2369', 'f2370', 'f2371', 'f2372', 'f2373', 'f2374', 'f2375']\n",
"expected f2394, f2379, f2382, f2377, f2389, f2396, f2397, f2386, f2392, f2381, f2393, f2388, f2390, f2378, f2383, f2387, f2385, f2380, f2391, f2384, f2376, f2395 in input data\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:55:32,259:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 140. configuration. Duration: 4.101027; loss: 2.000000; status 3; additional run info: \n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run15\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:55:32,332:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:55:32,407:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:55:32,560:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 114 training points for SMAC.\n",
"[INFO] [2016-08-16 07:55:34,882:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.377049 14: 0.377049 15: 0.377049 16: 0.377049 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.381148 32: 0.385246 33: 0.385246 34: 0.385246 35: 0.385246 36: 0.389344 37: 0.389344 38: 0.389344 39: 0.389344 40: 0.385246 41: 0.385246 42: 0.385246 43: 0.385246 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [25, 11, 18, 4, 11, 11, 2, 2, 2, 2, 2, 2, 2, 11, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 24, 2, 2, 2, 2, 2, 7, 7, 11, 2, 2, 2, 2, 2, 2, 2, 11, 2, 2, 2, 11, 2, 2, 2, 2, 2]\n",
"\tWeights: [ 0. 0. 0.72 0. 0.02 0. 0. 0.04 0. 0. 0. 0.16\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0.\n",
" 0.02 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 21) (1, 31) (1, 40) (1, 63) (1, 73) (1, 79)\n",
"[INFO] [2016-08-16 07:55:34,891:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:55:34,894:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.566944 seconds\n",
"[INFO] [2016-08-16 07:55:34,896:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:55:55,631:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 23.0681 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:55:55,640:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 141. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:55,642:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.102161965068\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:56,816:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 141. configuration. Duration: 1.099840; loss: 0.659836; status 1; additional run info: ;duration: 1.099839687347412;num_run:00141 \n",
"[INFO] [2016-08-16 07:55:56,827:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 142. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:56,830:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 2.45719278424\n",
" classifier:extra_trees:min_samples_leaf, Value: 16\n",
" classifier:extra_trees:min_samples_split, Value: 3\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 17\n",
" preprocessor:gem:precond, Value: 0.436169002639\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run15\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:55:57,042:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:55:57,207:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:55:57,370:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 142. configuration. Duration: 0.478962; loss: 0.721311; status 1; additional run info: ;duration: 0.4789619445800781;num_run:00142 \n",
"[INFO] [2016-08-16 07:55:57,381:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 143. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:57,384:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 6\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:58,396:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 143. configuration. Duration: 0.922374; loss: 0.651639; status 1; additional run info: ;duration: 0.9223735332489014;num_run:00143 \n",
"[INFO] [2016-08-16 07:55:58,407:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 144. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:58,409:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 0.0673868626678\n",
" classifier:bernoulli_nb:fit_prior, Value: True\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000108475751982\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 10\n",
" preprocessor:gem:precond, Value: 0.0268437276948\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:58,560:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 144. configuration. Duration: 0.054932; loss: 0.778689; status 1; additional run info: ;duration: 0.054931640625;num_run:00144 \n",
"[INFO] [2016-08-16 07:55:58,572:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 145. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:58,575:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 6\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.345646237116\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:59,494:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 145. configuration. Duration: 0.846189; loss: 0.651639; status 1; additional run info: ;duration: 0.8461892604827881;num_run:00145 \n",
"[INFO] [2016-08-16 07:55:59,505:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 146. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:59,507:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 1.00093316056e-05\n",
" classifier:sgd:average, Value: False\n",
" classifier:sgd:eta0, Value: 0.0326282800531\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: invscaling\n",
" classifier:sgd:loss, Value: perceptron\n",
" classifier:sgd:n_iter, Value: 6\n",
" classifier:sgd:penalty, Value: l2\n",
" classifier:sgd:power_t, Value: 0.386306851624\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0420798829914\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:55:59,594:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 146. configuration. Duration: 0.038296; loss: 0.930328; status 1; additional run info: ;duration: 0.03829646110534668;num_run:00146 \n",
"[INFO] [2016-08-16 07:55:59,605:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 147. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:55:59,608:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.1\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 50\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 10\n",
" preprocessor:gem:precond, Value: 0.1\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:56:00,073:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 147. configuration. Duration: 0.401770; loss: 0.692623; status 1; additional run info: ;duration: 0.40176963806152344;num_run:00147 \n",
"[INFO] [2016-08-16 07:56:00,086:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 148. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:00,089:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 5.38617674192\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0335017435815\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: nystroem_sampler\n",
" preprocessor:nystroem_sampler:coef0, Value: -0.56547115424\n",
" preprocessor:nystroem_sampler:gamma, Value: 5.66906760778\n",
" preprocessor:nystroem_sampler:kernel, Value: sigmoid\n",
" preprocessor:nystroem_sampler:n_components, Value: 2150\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/kernel_approximation.py:463: UserWarning: n_components > n_samples. This is not possible.\n",
"n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n",
" warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:00,997:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.385246 7: 0.381148 8: 0.381148 9: 0.377049 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.377049 14: 0.377049 15: 0.377049 16: 0.377049 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.372951 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.381148 32: 0.385246 33: 0.385246 34: 0.385246 35: 0.385246 36: 0.389344 37: 0.389344 38: 0.389344 39: 0.389344 40: 0.385246 41: 0.385246 42: 0.385246 43: 0.385246 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [24, 10, 17, 3, 10, 10, 1, 1, 1, 1, 1, 1, 1, 10, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 23, 1, 1, 1, 1, 1, 6, 6, 10, 1, 1, 1, 1, 1, 1, 1, 10, 1, 1, 1, 10, 1, 1, 1, 1, 1]\n",
"\tWeights: [ 0. 0.72 0. 0.02 0. 0. 0.04 0. 0. 0. 0.16 0. 0.\n",
" 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0.02 0.02\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 14) (1, 21) (1, 31) (1, 40) (1, 63) (1, 73) (1, 79)\n",
"[INFO] [2016-08-16 07:56:01,020:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:56:01,053:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 4.020052 seconds\n",
"[INFO] [2016-08-16 07:56:01,063:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:688: UserWarning: Variables are collinear\n",
" warnings.warn(\"Variables are collinear\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:01,916:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 148. configuration. Duration: 1.652377; loss: 0.856557; status 1; additional run info: ;duration: 1.652376651763916;num_run:00148 \n",
"[INFO] [2016-08-16 07:56:01,937:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 149. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:01,943:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.1\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 50\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.193390083915\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:56:02,298:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 149. configuration. Duration: 0.263699; loss: 0.663934; status 1; additional run info: ;duration: 0.2636992931365967;num_run:00149 \n",
"[INFO] [2016-08-16 07:56:02,316:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 150. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:02,320:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 3.23971584618\n",
" classifier:bernoulli_nb:fit_prior, Value: True\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.292882349526\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.350242476976\n",
" preprocessor:select_rates:mode, Value: fdr\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:02,410:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 150. configuration. Duration: 0.031575; loss: 0.872951; status 1; additional run info: ;duration: 0.03157520294189453;num_run:00150 \n",
"[INFO] [2016-08-16 07:56:02,424:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 151. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:02,429:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.1\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 50\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00031942807827\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: ward\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 25\n",
" preprocessor:feature_agglomeration:pooling_func, Value: mean\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:56:02,680:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 151. configuration. Duration: 0.184323; loss: 0.663934; status 1; additional run info: ;duration: 0.18432283401489258;num_run:00151 \n",
"[INFO] [2016-08-16 07:56:02,694:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 152. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:02,698:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 7.7447270459\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 65.4871140355\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:56:02,795:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 152. configuration. Duration: 0.037527; loss: 0.831967; status 1; additional run info: ;duration: 0.03752732276916504;num_run:00152 \n",
"[INFO] [2016-08-16 07:56:02,807:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 153. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:02,810:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.1\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 50\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.099038041764\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: ward\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 25\n",
" preprocessor:feature_agglomeration:pooling_func, Value: mean\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:56:03,077:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 153. configuration. Duration: 0.200906; loss: 0.663934; status 1; additional run info: ;duration: 0.20090603828430176;num_run:00153 \n",
"[INFO] [2016-08-16 07:56:03,092:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 154. configuration (from SMAC) with time limit 360s.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run15\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:03,096:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 4.79774405878\n",
" classifier:extra_trees:min_samples_leaf, Value: 11\n",
" classifier:extra_trees:min_samples_split, Value: 18\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000111418221433\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:gamma, Value: 0.0191105526929\n",
" preprocessor:kernel_pca:kernel, Value: rbf\n",
" preprocessor:kernel_pca:n_components, Value: 106\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[ERROR] [2016-08-16 07:56:03,109:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:56:03,261:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:04,242:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 154. configuration. Duration: 0.999073; loss: 0.750000; status 1; additional run info: ;duration: 0.9990730285644531;num_run:00154 \n",
"[INFO] [2016-08-16 07:56:04,255:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 155. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:04,260:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.0715608529004\n",
" classifier:adaboost:max_depth, Value: 10\n",
" classifier:adaboost:n_estimators, Value: 444\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 388\n",
" preprocessor:feature_agglomeration:pooling_func, Value: mean\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:56:07,853:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 155. configuration. Duration: 3.438564; loss: 0.774590; status 1; additional run info: ;duration: 3.438563823699951;num_run:00155 \n",
"[INFO] [2016-08-16 07:56:07,868:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 156. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:07,871:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: entropy\n",
" classifier:decision_tree:max_depth, Value: 0.301538668998\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 2\n",
" classifier:decision_tree:min_samples_split, Value: 12\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000275256352782\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 258.969125453\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.000234836662375\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:56:08,048:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.381148 7: 0.377049 8: 0.377049 9: 0.381148 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.377049 14: 0.377049 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.385246 19: 0.381148 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.377049 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.385246 28: 0.385246 29: 0.385246 30: 0.385246 31: 0.385246 32: 0.385246 33: 0.381148 34: 0.385246 35: 0.385246 36: 0.385246 37: 0.385246 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.377049 47: 0.377049 48: 0.381148 49: 0.381148\n",
"\tMembers: [21, 7, 14, 2, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 2, 3, 6, 2, 6, 7, 7, 3, 2, 1, 14, 7, 7, 7, 7, 7, 20, 47, 7, 7, 7, 6, 7, 7, 7, 7, 7, 7, 7, 3, 2, 6, 7]\n",
"\tWeights: [ 0. 0.02 0.1 0.06 0. 0. 0.1 0.62 0. 0. 0. 0. 0.\n",
" 0. 0.04 0. 0. 0. 0. 0. 0.02 0.02 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. ]\n",
"\tIdentifiers: (1, 19) (1, 21) (1, 31) (1, 39) (1, 40) (1, 63) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:56:08,060:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:56:08,064:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 4.965663 seconds\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:08,071:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (148)!.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:08,074:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (148)!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:56:08,097:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:56:08,133:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 156. configuration. Duration: 0.201516; loss: 0.745902; status 1; additional run info: ;duration: 0.20151591300964355;num_run:00156 \n",
"[INFO] [2016-08-16 07:56:08,146:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 157. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:08,152:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.1\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 50\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0822641305677\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: deflation\n",
" preprocessor:fast_ica:fun, Value: exp\n",
" preprocessor:fast_ica:n_components, Value: 457\n",
" preprocessor:fast_ica:whiten, Value: True\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"n_components is too large: it will be set to 9\n",
"[ERROR] [2016-08-16 07:56:08,231:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:08,511:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 157. configuration. Duration: 0.289022; loss: 0.709016; status 1; additional run info: ;duration: 0.28902196884155273;num_run:00157 \n",
"[INFO] [2016-08-16 07:56:08,525:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 158. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:08,529:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 2.31872587345\n",
" classifier:bernoulli_nb:fit_prior, Value: True\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.155574273535\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: nystroem_sampler\n",
" preprocessor:nystroem_sampler:gamma, Value: 0.00377270364971\n",
" preprocessor:nystroem_sampler:kernel, Value: rbf\n",
" preprocessor:nystroem_sampler:n_components, Value: 7416\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/kernel_approximation.py:463: UserWarning: n_components > n_samples. This is not possible.\n",
"n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n",
" warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:10,224:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 158. configuration. Duration: 1.546302; loss: 0.856557; status 1; additional run info: ;duration: 1.546302318572998;num_run:00158 \n",
"[INFO] [2016-08-16 07:56:10,237:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 159. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:10,240:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.525298023193\n",
" classifier:adaboost:max_depth, Value: 6\n",
" classifier:adaboost:n_estimators, Value: 168\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: ward\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 275\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:11,047:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 159. configuration. Duration: 0.718596; loss: 0.774590; status 1; additional run info: ;duration: 0.7185957431793213;num_run:00159 \n",
"[INFO] [2016-08-16 07:56:11,061:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 160. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:11,067:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.000247072542735\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: hinge\n",
" classifier:passive_aggressive:n_iter, Value: 285\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.000172684354075\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: manhattan\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 179\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:56:11,905:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 160. configuration. Duration: 0.753995; loss: 0.803279; status 1; additional run info: ;duration: 0.7539947032928467;num_run:00160 \n",
"[INFO] [2016-08-16 07:56:11,926:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 161. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:11,932:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0102608574057\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 9\n",
" classifier:xgradient_boosting:min_child_weight, Value: 14\n",
" classifier:xgradient_boosting:n_estimators, Value: 419\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.0299535003052\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 8\n",
" preprocessor:gem:precond, Value: 0.475489481424\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:56:12,945:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 161. configuration. Duration: 0.903133; loss: 0.860656; status 1; additional run info: ;duration: 0.9031331539154053;num_run:00161 \n",
"[INFO] [2016-08-16 07:56:12,960:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 162. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:12,963:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: proj_logit\n",
" classifier:proj_logit:max_epochs, Value: 5\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0395011824554\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 1.84792585156\n",
" preprocessor:kitchen_sinks:n_components, Value: 1659\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:14,237:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.381148 7: 0.377049 8: 0.377049 9: 0.381148 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.377049 14: 0.377049 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.385246 19: 0.381148 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.377049 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.385246 28: 0.385246 29: 0.385246 30: 0.385246 31: 0.385246 32: 0.385246 33: 0.381148 34: 0.385246 35: 0.385246 36: 0.385246 37: 0.385246 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.377049 47: 0.377049 48: 0.381148 49: 0.381148\n",
"\tMembers: [21, 7, 14, 2, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 2, 3, 6, 2, 6, 7, 7, 3, 2, 1, 14, 7, 7, 7, 7, 7, 20, 47, 7, 7, 7, 6, 7, 7, 7, 7, 7, 7, 7, 3, 2, 6, 7]\n",
"\tWeights: [ 0. 0.02 0.1 0.06 0. 0. 0.1 0.62 0. 0. 0. 0. 0.\n",
" 0. 0.04 0. 0. 0. 0. 0. 0.02 0.02 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. ]\n",
"\tIdentifiers: (1, 19) (1, 21) (1, 31) (1, 39) (1, 40) (1, 63) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:56:14,260:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:56:14,272:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 6.184266 seconds\n",
"[INFO] [2016-08-16 07:56:14,277:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run16\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:56:16,367:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:56:16,775:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:17,622:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 162. configuration. Duration: 4.596748; loss: 0.877049; status 1; additional run info: ;duration: 4.596747636795044;num_run:00162 \n",
"[INFO] [2016-08-16 07:56:17,636:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 163. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:17,641:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0333938834612\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 1\n",
" classifier:xgradient_boosting:min_child_weight, Value: 13\n",
" classifier:xgradient_boosting:n_estimators, Value: 176\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.880926669034\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.194565211102\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: deflation\n",
" preprocessor:fast_ica:fun, Value: exp\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:56:18,086:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 163. configuration. Duration: 0.379196; loss: 0.737705; status 1; additional run info: ;duration: 0.3791956901550293;num_run:00163 \n",
"[INFO] [2016-08-16 07:56:18,101:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 164. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:18,105:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0234978864833\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 12\n",
" classifier:xgradient_boosting:n_estimators, Value: 398\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.17884284812\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00018384356309\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 20755.6098304\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.00202042994347\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:19,179:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 164. configuration. Duration: 0.976017; loss: 0.684426; status 1; additional run info: ;duration: 0.9760172367095947;num_run:00164 \n",
"[INFO] [2016-08-16 07:56:20,147:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 138 training points for SMAC.\n",
"[INFO] [2016-08-16 07:56:22,912:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.381148 7: 0.377049 8: 0.377049 9: 0.381148 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.377049 14: 0.377049 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.385246 19: 0.381148 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.377049 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.385246 28: 0.385246 29: 0.385246 30: 0.385246 31: 0.385246 32: 0.385246 33: 0.381148 34: 0.385246 35: 0.385246 36: 0.385246 37: 0.385246 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.377049 47: 0.377049 48: 0.381148 49: 0.381148\n",
"\tMembers: [21, 7, 14, 2, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 2, 3, 6, 2, 6, 7, 7, 3, 2, 1, 14, 7, 7, 7, 7, 7, 20, 47, 7, 7, 7, 6, 7, 7, 7, 7, 7, 7, 7, 3, 2, 6, 7]\n",
"\tWeights: [ 0. 0.02 0.1 0.06 0. 0. 0.1 0.62 0. 0. 0. 0. 0.\n",
" 0. 0.04 0. 0. 0. 0. 0. 0.02 0.02 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. ]\n",
"\tIdentifiers: (1, 19) (1, 21) (1, 31) (1, 39) (1, 40) (1, 63) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:56:22,938:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:56:22,949:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 6.624661 seconds\n",
"[INFO] [2016-08-16 07:56:22,954:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run16\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:56:24,988:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:56:25,142:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:29,942:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.381148 7: 0.377049 8: 0.377049 9: 0.381148 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.377049 14: 0.377049 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.385246 19: 0.381148 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.377049 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.385246 28: 0.385246 29: 0.385246 30: 0.385246 31: 0.385246 32: 0.385246 33: 0.381148 34: 0.385246 35: 0.385246 36: 0.385246 37: 0.385246 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.377049 47: 0.377049 48: 0.381148 49: 0.381148\n",
"\tMembers: [20, 7, 13, 2, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 2, 3, 6, 2, 6, 7, 7, 3, 2, 1, 13, 7, 7, 7, 7, 7, 19, 46, 7, 7, 7, 6, 7, 7, 7, 7, 7, 7, 7, 3, 2, 6, 7]\n",
"\tWeights: [ 0. 0.02 0.1 0.06 0. 0. 0.1 0.62 0. 0. 0. 0. 0.\n",
" 0.04 0. 0. 0. 0. 0. 0.02 0.02 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. ]\n",
"\tIdentifiers: (1, 19) (1, 21) (1, 31) (1, 39) (1, 40) (1, 63) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:56:29,955:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:56:29,958:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 4.982001 seconds\n",
"[INFO] [2016-08-16 07:56:29,961:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:56:46,440:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 26.2874 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:56:46,446:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 165. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:46,448:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.464449375343\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:47,111:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 165. configuration. Duration: 0.609321; loss: 0.647541; status 1; additional run info: ;duration: 0.609320878982544;num_run:00165 \n",
"[INFO] [2016-08-16 07:56:47,119:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 166. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:47,121:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.082436911571\n",
" classifier:adaboost:max_depth, Value: 9\n",
" classifier:adaboost:n_estimators, Value: 283\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 36\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run16\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:56:48,063:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:56:48,130:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:48,972:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 166. configuration. Duration: 1.789099; loss: 0.807377; status 1; additional run info: ;duration: 1.7890987396240234;num_run:00166 \n",
"[INFO] [2016-08-16 07:56:48,979:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 167. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:48,981:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:49,624:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 167. configuration. Duration: 0.591942; loss: 0.647541; status 1; additional run info: ;duration: 0.591942310333252;num_run:00167 \n",
"[INFO] [2016-08-16 07:56:49,632:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 168. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:49,634:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: entropy\n",
" classifier:decision_tree:max_depth, Value: 1.83605783289\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 1\n",
" classifier:decision_tree:min_samples_split, Value: 7\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.266109882375\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 12.2959125875\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:49,688:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 168. configuration. Duration: 0.019612; loss: 0.852459; status 1; additional run info: ;duration: 0.01961231231689453;num_run:00168 \n",
"[INFO] [2016-08-16 07:56:49,695:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 169. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:49,697:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:50,234:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.381148 7: 0.377049 8: 0.377049 9: 0.381148 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.377049 14: 0.377049 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.385246 19: 0.381148 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.377049 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.372951 28: 0.385246 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.385246 44: 0.385246 45: 0.385246 46: 0.377049 47: 0.381148 48: 0.377049 49: 0.377049\n",
"\tMembers: [19, 6, 12, 1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 1, 2, 5, 1, 5, 6, 6, 2, 1, 4, 1, 38, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 45, 6, 6, 3, 19, 6, 6]\n",
"\tWeights: [ 0. 0.1 0.04 0.02 0.02 0.06 0.66 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0.04 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 21) (1, 31) (1, 34) (1, 37) (1, 39) (1, 40) (1, 63) (1, 79) (1, 123) (1, 149)\n",
"[INFO] [2016-08-16 07:56:50,242:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.377049\n",
"[INFO] [2016-08-16 07:56:50,243:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.184792 seconds\n",
"[INFO] [2016-08-16 07:56:50,247:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (160)!.\n",
"[INFO] [2016-08-16 07:56:50,249:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (160)!\n",
"[ERROR] [2016-08-16 07:56:50,260:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:56:50,331:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:50,357:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 169. configuration. Duration: 0.606712; loss: 0.647541; status 1; additional run info: ;duration: 0.6067123413085938;num_run:00169 \n",
"[INFO] [2016-08-16 07:56:50,365:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 170. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:50,367:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 64.4613115513\n",
" classifier:bernoulli_nb:fit_prior, Value: False\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 1.50706389678\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 11\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 16\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:50,646:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 170. configuration. Duration: 0.233033; loss: 0.889344; status 1; additional run info: ;duration: 0.2330327033996582;num_run:00170 \n",
"[INFO] [2016-08-16 07:56:50,654:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 171. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:50,657:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:51,375:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 171. configuration. Duration: 0.655082; loss: 0.647541; status 1; additional run info: ;duration: 0.6550819873809814;num_run:00171 \n",
"[INFO] [2016-08-16 07:56:51,382:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 172. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:51,384:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 0.0294373722547\n",
" classifier:bernoulli_nb:fit_prior, Value: True\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 10164.1133467\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.00247143999705\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:51,457:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 172. configuration. Duration: 0.032361; loss: 0.811475; status 1; additional run info: ;duration: 0.03236103057861328;num_run:00172 \n",
"[INFO] [2016-08-16 07:56:51,464:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 173. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:51,466:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:52,215:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 173. configuration. Duration: 0.689291; loss: 0.647541; status 1; additional run info: ;duration: 0.6892907619476318;num_run:00173 \n",
"[INFO] [2016-08-16 07:56:52,223:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 174. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:52,225:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 1.41662177149\n",
" classifier:bernoulli_nb:fit_prior, Value: False\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00275401501274\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:56:52,284:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 174. configuration. Duration: 0.018614; loss: 0.872951; status 1; additional run info: ;duration: 0.01861429214477539;num_run:00174 \n",
"[INFO] [2016-08-16 07:56:52,292:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 175. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:52,295:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:52,708:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.368852 3: 0.377049 4: 0.381148 5: 0.385246 6: 0.381148 7: 0.377049 8: 0.377049 9: 0.381148 10: 0.381148 11: 0.381148 12: 0.381148 13: 0.377049 14: 0.377049 15: 0.372951 16: 0.372951 17: 0.372951 18: 0.385246 19: 0.381148 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.377049 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.372951 28: 0.385246 29: 0.381148 30: 0.381148 31: 0.381148 32: 0.381148 33: 0.381148 34: 0.381148 35: 0.381148 36: 0.381148 37: 0.381148 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.385246 44: 0.385246 45: 0.385246 46: 0.377049 47: 0.377049 48: 0.377049 49: 0.377049\n",
"\tMembers: [18, 5, 11, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 1, 2, 4, 1, 4, 5, 5, 2, 1, 3, 1, 37, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 44, 5, 5, 5, 5, 5, 5]\n",
"\tWeights: [ 0. 0.1 0.04 0.02 0.06 0.7 0. 0. 0. 0. 0. 0.02\n",
" 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 21) (1, 31) (1, 37) (1, 39) (1, 40) (1, 63) (1, 79) (1, 123) (1, 149)\n",
"[INFO] [2016-08-16 07:56:52,716:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.377049\n",
"[INFO] [2016-08-16 07:56:52,719:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.463970 seconds\n",
"[INFO] [2016-08-16 07:56:52,723:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (163)!.\n",
"[INFO] [2016-08-16 07:56:52,726:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (163)!\n",
"[ERROR] [2016-08-16 07:56:52,739:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:56:52,814:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:53,014:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 175. configuration. Duration: 0.659380; loss: 0.647541; status 1; additional run info: ;duration: 0.6593797206878662;num_run:00175 \n",
"[INFO] [2016-08-16 07:56:53,025:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 176. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:53,028:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.0113865755932\n",
" classifier:sgd:average, Value: True\n",
" classifier:sgd:eta0, Value: 0.059741833941\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: constant\n",
" classifier:sgd:loss, Value: perceptron\n",
" classifier:sgd:n_iter, Value: 195\n",
" classifier:sgd:penalty, Value: l1\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.498479508115\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: deflation\n",
" preprocessor:fast_ica:fun, Value: cube\n",
" preprocessor:fast_ica:n_components, Value: 620\n",
" preprocessor:fast_ica:whiten, Value: True\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"n_components is too large: it will be set to 9\n",
"[INFO] [2016-08-16 07:56:53,461:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 176. configuration. Duration: 0.393487; loss: 1.159836; status 1; additional run info: ;duration: 0.39348673820495605;num_run:00176 \n",
"[INFO] [2016-08-16 07:56:53,468:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 177. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:53,470:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0138289887612\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:54,214:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 177. configuration. Duration: 0.686153; loss: 0.647541; status 1; additional run info: ;duration: 0.6861526966094971;num_run:00177 \n",
"[INFO] [2016-08-16 07:56:54,224:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 178. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:54,227:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.227552709207\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 10\n",
" classifier:gradient_boosting:max_features, Value: 3.33583602153\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 11\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 136\n",
" classifier:gradient_boosting:subsample, Value: 0.221801564857\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.000120541543842\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: exp\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 116, in _ica_par\n",
" warnings.warn('FastICA did not converge. Consider increasing '\n",
"UserWarning: FastICA did not converge. Consider increasing tolerance or the maximum number of iterations.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:55,285:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.368852 14: 0.368852 15: 0.377049 16: 0.377049 17: 0.377049 18: 0.377049 19: 0.377049 20: 0.377049 21: 0.377049 22: 0.377049 23: 0.377049 24: 0.381148 25: 0.381148 26: 0.381148 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.381148 33: 0.385246 34: 0.385246 35: 0.381148 36: 0.381148 37: 0.377049 38: 0.381148 39: 0.381148 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.393443 45: 0.393443 46: 0.393443 47: 0.389344 48: 0.389344 49: 0.389344\n",
"\tMembers: [16, 5, 5, 16, 5, 16, 41, 10, 15, 0, 13, 5, 5, 1, 2, 36, 1, 5, 5, 5, 5, 5, 5, 5, 41, 5, 5, 5, 5, 5, 5, 3, 15, 5, 5, 5, 5, 2, 15, 5, 5, 5, 13, 1, 2, 5, 5, 5, 5, 5]\n",
"\tWeights: [ 0.02 0.06 0.06 0.02 0. 0.6 0. 0. 0. 0. 0.02 0. 0.\n",
" 0.04 0. 0.06 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.\n",
" 0. 0. 0.04 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 13) (1, 21) (1, 31) (1, 37) (1, 40) (1, 65) (1, 69) (1, 73) (1, 79) (1, 126) (1, 149)\n",
"[INFO] [2016-08-16 07:56:55,292:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.389344\n",
"[INFO] [2016-08-16 07:56:55,294:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.560404 seconds\n",
"[INFO] [2016-08-16 07:56:55,297:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (169)!.\n",
"[INFO] [2016-08-16 07:56:55,299:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (169)!\n",
"[ERROR] [2016-08-16 07:56:55,311:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:56:55,384:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:55,458:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 178. configuration. Duration: 1.222356; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:56:55,467:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 179. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:55,469:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[ERROR] [2016-08-16 07:56:55,496:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:56:56,195:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 179. configuration. Duration: 0.669811; loss: 0.647541; status 1; additional run info: ;duration: 0.6698112487792969;num_run:00179 \n",
"[INFO] [2016-08-16 07:56:56,204:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 180. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:56,207:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: gini\n",
" classifier:decision_tree:max_depth, Value: 0.228822287652\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 17\n",
" classifier:decision_tree:min_samples_split, Value: 2\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00208599333996\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: deflation\n",
" preprocessor:fast_ica:fun, Value: cube\n",
" preprocessor:fast_ica:n_components, Value: 1219\n",
" preprocessor:fast_ica:whiten, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"n_components is too large: it will be set to 9\n",
"[INFO] [2016-08-16 07:56:56,336:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 180. configuration. Duration: 0.090675; loss: 0.770492; status 1; additional run info: ;duration: 0.09067535400390625;num_run:00180 \n",
"[INFO] [2016-08-16 07:56:56,345:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 181. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:56,347:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.0404096759333\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 465\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:56:57,223:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 181. configuration. Duration: 0.799992; loss: 0.803279; status 1; additional run info: ;duration: 0.7999916076660156;num_run:00181 \n",
"[INFO] [2016-08-16 07:56:57,232:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 182. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:57,235:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 129\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 0.0858733688445\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:56:57,303:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 182. configuration. Duration: 0.026963; loss: 0.725410; status 1; additional run info: ;duration: 0.0269625186920166;num_run:00182 \n",
"[INFO] [2016-08-16 07:56:57,313:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 183. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:57,316:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 2.44638253826\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 11\n",
" classifier:random_forest:min_samples_split, Value: 18\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00683265751935\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:56:57,618:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 183. configuration. Duration: 0.253443; loss: 0.680328; status 1; additional run info: ;duration: 0.25344324111938477;num_run:00183 \n",
"[INFO] [2016-08-16 07:56:57,626:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 184. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:57,628:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gaussian_nb\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0111264470674\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 2.6722951724\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 11\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 9\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:57,802:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.368852 14: 0.364754 15: 0.377049 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.368852 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.372951 36: 0.377049 37: 0.377049 38: 0.377049 39: 0.377049 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [15, 4, 4, 15, 4, 15, 39, 9, 14, 0, 12, 4, 4, 1, 3, 10, 4, 2, 4, 4, 4, 4, 1, 0, 10, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 3, 2, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4]\n",
"\tWeights: [ 0.04 0.06 0.06 0.08 0.58 0. 0. 0. 0. 0.02 0.04 0.\n",
" 0.02 0. 0.02 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 13) (1, 21) (1, 37) (1, 39) (1, 40) (1, 65) (1, 67) (1, 69) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:56:57,811:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:56:57,813:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.507314 seconds\n",
"[INFO] [2016-08-16 07:56:57,816:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (172)!.\n",
"[INFO] [2016-08-16 07:56:57,818:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (172)!\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:56:57,830:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:56:57,905:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 184. configuration. Duration: 0.229496; loss: 0.713115; status 1; additional run info: ;duration: 0.22949624061584473;num_run:00184 \n",
"[ERROR] [2016-08-16 07:56:57,907:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:56:57,915:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 185. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:57,917:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.285378581226\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 12\n",
" classifier:xgradient_boosting:n_estimators, Value: 229\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.635219713353\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.000544544963901\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[ERROR] [2016-08-16 07:56:58,024:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:56:58,595:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 185. configuration. Duration: 0.638471; loss: 0.733607; status 1; additional run info: ;duration: 0.6384713649749756;num_run:00185 \n",
"[INFO] [2016-08-16 07:56:58,603:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 186. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:58,605:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gaussian_nb\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.190921236574\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: exp\n",
" preprocessor:fast_ica:n_components, Value: 844\n",
" preprocessor:fast_ica:whiten, Value: True\n",
" rescaling:__choice__, Value: none\n",
"\n",
"n_components is too large: it will be set to 9\n",
"[INFO] [2016-08-16 07:56:58,732:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 186. configuration. Duration: 0.048867; loss: 0.745902; status 1; additional run info: ;duration: 0.04886674880981445;num_run:00186 \n",
"[INFO] [2016-08-16 07:56:58,741:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 187. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:58,743:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.072494349798\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 7\n",
" classifier:xgradient_boosting:n_estimators, Value: 81\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.0872885268293\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 13\n",
" preprocessor:gem:precond, Value: 0.247644101237\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:56:59,082:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 187. configuration. Duration: 0.299846; loss: 0.713115; status 1; additional run info: ;duration: 0.29984617233276367;num_run:00187 \n",
"[INFO] [2016-08-16 07:56:59,090:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 188. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:56:59,092:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gaussian_nb\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:coef0, Value: 0.631472001441\n",
" preprocessor:kernel_pca:kernel, Value: sigmoid\n",
" preprocessor:kernel_pca:n_components, Value: 279\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 148, in _pre_transform\n",
" .transform(Xt)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/kernel_pca.py\", line 53, in transform\n",
" raise ValueError(\"KernelPCA removed all features!\")\n",
"ValueError: KernelPCA removed all features!\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:00,361:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 188. configuration. Duration: 1.260324; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:57:00,370:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 189. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:00,372:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.433927756858\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 11\n",
" classifier:xgradient_boosting:n_estimators, Value: 310\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.144428241987\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:00,718:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 189. configuration. Duration: 0.300743; loss: 0.704918; status 1; additional run info: ;duration: 0.30074334144592285;num_run:00189 \n",
"[INFO] [2016-08-16 07:57:00,728:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 190. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:00,730:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 5.55703591689\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000277356064305\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:kernel, Value: cosine\n",
" preprocessor:kernel_pca:n_components, Value: 163\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:00,808:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.368852 14: 0.364754 15: 0.377049 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.368852 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.372951 36: 0.377049 37: 0.377049 38: 0.377049 39: 0.377049 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [13, 4, 4, 13, 4, 13, 37, 7, 12, 0, 10, 4, 4, 1, 3, 8, 4, 2, 4, 4, 4, 4, 1, 0, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 3, 2, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4]\n",
"\tWeights: [ 0.04 0.06 0.06 0.08 0.58 0. 0. 0.02 0.04 0. 0.02 0.\n",
" 0.02 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 13) (1, 21) (1, 37) (1, 39) (1, 40) (1, 65) (1, 67) (1, 69) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:00,816:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:57:00,818:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.994050 seconds\n",
"[INFO] [2016-08-16 07:57:00,838:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:688: UserWarning: Variables are collinear\n",
" warnings.warn(\"Variables are collinear\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:01,214:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 190. configuration. Duration: 0.406752; loss: 0.877049; status 1; additional run info: ;duration: 0.4067518711090088;num_run:00190 \n",
"[INFO] [2016-08-16 07:57:01,223:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 191. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:01,225:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.0228337656859\n",
" classifier:adaboost:max_depth, Value: 6\n",
" classifier:adaboost:n_estimators, Value: 218\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00053752512301\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 18\n",
" preprocessor:gem:precond, Value: 0.345685570452\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run20\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:57:02,858:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:02,959:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:03,131:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:57:04,999:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 191. configuration. Duration: 3.706326; loss: 0.856557; status 1; additional run info: ;duration: 3.7063255310058594;num_run:00191 \n",
"[INFO] [2016-08-16 07:57:05,009:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 192. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:05,015:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 0.75563116613\n",
" classifier:extra_trees:min_samples_leaf, Value: 9\n",
" classifier:extra_trees:min_samples_split, Value: 13\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.492722090532\n",
" preprocessor:select_rates:mode, Value: fdr\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:57:05,327:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 192. configuration. Duration: 0.255367; loss: 0.725410; status 1; additional run info: ;duration: 0.25536656379699707;num_run:00192 \n",
"[INFO] [2016-08-16 07:57:05,338:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 193. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:05,342:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 1.22709021285\n",
" classifier:adaboost:max_depth, Value: 8\n",
" classifier:adaboost:n_estimators, Value: 234\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0446458489034\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 93.9542819586\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:05,796:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.368852 14: 0.364754 15: 0.377049 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.368852 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.372951 36: 0.377049 37: 0.377049 38: 0.377049 39: 0.377049 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [13, 4, 4, 13, 4, 13, 37, 7, 12, 0, 10, 4, 4, 1, 3, 8, 4, 2, 4, 4, 4, 4, 1, 0, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 3, 2, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4]\n",
"\tWeights: [ 0.04 0.06 0.06 0.08 0.58 0. 0. 0.02 0.04 0. 0.02 0.\n",
" 0.02 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 13) (1, 21) (1, 37) (1, 39) (1, 40) (1, 65) (1, 67) (1, 69) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:05,804:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:57:05,807:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.955733 seconds\n",
"[INFO] [2016-08-16 07:57:05,809:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:57:06,262:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 193. configuration. Duration: 0.847360; loss: 0.770492; status 1; additional run info: ;duration: 0.8473598957061768;num_run:00193 \n",
"[INFO] [2016-08-16 07:57:06,272:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 194. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:06,273:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME.R\n",
" classifier:adaboost:learning_rate, Value: 0.411057351669\n",
" classifier:adaboost:max_depth, Value: 7\n",
" classifier:adaboost:n_estimators, Value: 462\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: manhattan\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 394\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run20\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:57:07,836:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:07,911:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:57:07,927:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 194. configuration. Duration: 1.566479; loss: 0.774590; status 1; additional run info: ;duration: 1.5664787292480469;num_run:00194 \n",
"[INFO] [2016-08-16 07:57:07,936:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 195. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:07,938:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 1.41778154608\n",
" classifier:adaboost:max_depth, Value: 8\n",
" classifier:adaboost:n_estimators, Value: 351\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00305515385341\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 2.57932465698\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 2\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 5\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[ERROR] [2016-08-16 07:57:08,032:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:09,196:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 195. configuration. Duration: 1.166501; loss: 0.729508; status 1; additional run info: ;duration: 1.1665012836456299;num_run:00195 \n",
"[INFO] [2016-08-16 07:57:09,204:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 196. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:09,206:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: k_nearest_neighbors\n",
" classifier:k_nearest_neighbors:n_neighbors, Value: 5\n",
" classifier:k_nearest_neighbors:p, Value: 1\n",
" classifier:k_nearest_neighbors:weights, Value: distance\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0210628354718\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 139.430670113\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.00708203034599\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:09,281:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 196. configuration. Duration: 0.037103; loss: 0.848361; status 1; additional run info: ;duration: 0.03710341453552246;num_run:00196 \n",
"[INFO] [2016-08-16 07:57:09,289:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 197. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:09,291:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: k_nearest_neighbors\n",
" classifier:k_nearest_neighbors:n_neighbors, Value: 1\n",
" classifier:k_nearest_neighbors:p, Value: 2\n",
" classifier:k_nearest_neighbors:weights, Value: distance\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: gem\n",
" preprocessor:gem:N, Value: 19\n",
" preprocessor:gem:precond, Value: 0.111302568621\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:57:09,420:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 197. configuration. Duration: 0.080479; loss: 0.823770; status 1; additional run info: ;duration: 0.08047914505004883;num_run:00197 \n",
"[INFO] [2016-08-16 07:57:09,429:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 198. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:09,432:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.377724405606\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 8\n",
" classifier:xgradient_boosting:min_child_weight, Value: 5\n",
" classifier:xgradient_boosting:n_estimators, Value: 271\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.637377828804\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 9\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 3\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 16\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 70\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:10,345:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.368852 14: 0.364754 15: 0.377049 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.368852 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.372951 36: 0.377049 37: 0.377049 38: 0.377049 39: 0.377049 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [13, 4, 4, 13, 4, 13, 37, 7, 12, 0, 10, 4, 4, 1, 3, 8, 4, 2, 4, 4, 4, 4, 1, 0, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 3, 2, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4]\n",
"\tWeights: [ 0.04 0.06 0.06 0.08 0.58 0. 0. 0.02 0.04 0. 0.02 0.\n",
" 0.02 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 13) (1, 21) (1, 37) (1, 39) (1, 40) (1, 65) (1, 67) (1, 69) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:10,354:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:57:10,357:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.526227 seconds\n",
"[INFO] [2016-08-16 07:57:10,359:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run20\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:57:12,378:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:12,448:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:12,556:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 60, in fit_predict_and_loss\n",
" return self.predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 115, in predict_and_loss\n",
" Y_optimization_pred, Y_valid_pred, Y_test_pred = self._predict()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 98, in _predict\n",
" self.Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/abstract_evaluator.py\", line 266, in _predict_proba\n",
" Y_pred = model.predict_proba(X, batch_size=1000)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 120, in predict_proba\n",
" target = self.predict_proba(X[0:2].copy())\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 112, in predict_proba\n",
" return self.pipeline_.steps[-1][-1].predict_proba(Xt)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/classification/xgradient_boosting.py\", line 141, in predict_proba\n",
" return self.estimator.predict_proba(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/xgboost/sklearn.py\", line 477, in predict_proba\n",
" ntree_limit=ntree_limit)\n",
" File \"/opt/conda/lib/python3.5/site-packages/xgboost/core.py\", line 939, in predict\n",
" self._validate_features(data)\n",
" File \"/opt/conda/lib/python3.5/site-packages/xgboost/core.py\", line 1179, in _validate_features\n",
" data.feature_names))\n",
"ValueError: feature_names mismatch: ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27', 'f28', 'f29', 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', 'f38', 'f39', 'f40', 'f41', 'f42', 'f43', 'f44', 'f45', 'f46', 'f47', 'f48', 'f49', 'f50', 'f51', 'f52', 'f53', 'f54', 'f55', 'f56', 'f57', 'f58', 'f59', 'f60', 'f61', 'f62', 'f63', 'f64', 'f65', 'f66', 'f67', 'f68', 'f69', 'f70', 'f71', 'f72', 'f73', 'f74', 'f75', 'f76', 'f77', 'f78', 'f79', 'f80', 'f81', 'f82', 'f83', 'f84', 'f85', 'f86', 'f87', 'f88', 'f89', 'f90', 'f91', 'f92', 'f93', 'f94', 'f95', 'f96', 'f97', 'f98', 'f99', 'f100', 'f101', 'f102', 'f103', 'f104', 'f105', 'f106', 'f107', 'f108', 'f109', 'f110', 'f111', 'f112', 'f113', 'f114', 'f115', 'f116', 'f117', 'f118', 'f119', 'f120', 'f121', 'f122', 'f123', 'f124', 'f125', 'f126', 'f127', 'f128', 'f129', 'f130', 'f131', 'f132', 'f133', 'f134', 'f135', 'f136', 'f137', 'f138', 'f139', 'f140', 'f141', 'f142', 'f143', 'f144', 'f145', 'f146', 'f147', 'f148', 'f149', 'f150', 'f151', 'f152', 'f153', 'f154', 'f155', 'f156', 'f157', 'f158', 'f159', 'f160', 'f161', 'f162', 'f163', 'f164', 'f165', 'f166', 'f167', 'f168', 'f169', 'f170', 'f171', 'f172', 'f173', 'f174', 'f175', 'f176', 'f177', 'f178', 'f179', 'f180', 'f181', 'f182', 'f183', 'f184', 'f185', 'f186', 'f187', 'f188', 'f189', 'f190', 'f191', 'f192', 'f193', 'f194', 'f195', 'f196', 'f197', 'f198', 'f199', 'f200', 'f201', 'f202', 'f203', 'f204', 'f205', 'f206', 'f207', 'f208', 'f209', 'f210', 'f211', 'f212', 'f213', 'f214', 'f215', 'f216', 'f217', 'f218', 'f219', 'f220', 'f221', 'f222', 'f223', 'f224', 'f225', 'f226', 'f227', 'f228', 'f229', 'f230', 'f231', 'f232', 'f233', 'f234', 'f235', 'f236', 'f237', 'f238', 'f239', 'f240', 'f241', 'f242', 'f243', 'f244', 'f245', 'f246', 'f247', 'f248', 'f249', 'f250', 'f251', 'f252', 'f253', 'f254', 'f255', 'f256', 'f257', 'f258', 'f259', 'f260', 'f261', 'f262', 'f263', 'f264', 'f265', 'f266', 'f267', 'f268', 'f269', 'f270', 'f271', 'f272', 'f273', 'f274', 'f275', 'f276', 'f277', 'f278', 'f279', 'f280', 'f281', 'f282', 'f283', 'f284', 'f285', 'f286', 'f287', 'f288', 'f289', 'f290', 'f291', 'f292', 'f293', 'f294', 'f295', 'f296', 'f297', 'f298', 'f299', 'f300', 'f301', 'f302', 'f303', 'f304', 'f305', 'f306', 'f307', 'f308', 'f309', 'f310', 'f311', 'f312', 'f313', 'f314', 'f315', 'f316', 'f317', 'f318', 'f319', 'f320', 'f321', 'f322', 'f323', 'f324', 'f325', 'f326', 'f327', 'f328', 'f329', 'f330', 'f331', 'f332', 'f333', 'f334', 'f335', 'f336', 'f337', 'f338', 'f339', 'f340', 'f341', 'f342', 'f343', 'f344', 'f345', 'f346', 'f347', 'f348', 'f349', 'f350', 'f351', 'f352', 'f353', 'f354', 'f355', 'f356', 'f357', 'f358', 'f359', 'f360', 'f361', 'f362', 'f363', 'f364', 'f365', 'f366', 'f367', 'f368', 'f369', 'f370', 'f371', 'f372', 'f373', 'f374', 'f375', 'f376', 'f377', 'f378', 'f379', 'f380', 'f381', 'f382', 'f383', 'f384', 'f385', 'f386', 'f387', 'f388', 'f389', 'f390', 'f391', 'f392', 'f393', 'f394', 'f395', 'f396', 'f397', 'f398', 'f399', 'f400', 'f401', 'f402', 'f403', 'f404', 'f405', 'f406', 'f407', 'f408', 'f409', 'f410', 'f411', 'f412', 'f413', 'f414', 'f415', 'f416', 'f417', 'f418', 'f419', 'f420', 'f421', 'f422', 'f423', 'f424', 'f425', 'f426', 'f427', 'f428', 'f429', 'f430', 'f431', 'f432', 'f433', 'f434', 'f435', 'f436', 'f437', 'f438', 'f439', 'f440', 'f441', 'f442', 'f443', 'f444', 'f445', 'f446', 'f447', 'f448', 'f449', 'f450', 'f451', 'f452', 'f453', 'f454', 'f455', 'f456', 'f457', 'f458', 'f459', 'f460', 'f461', 'f462', 'f463', 'f464', 'f465', 'f466', 'f467', 'f468', 'f469', 'f470', 'f471', 'f472', 'f473', 'f474', 'f475', 'f476', 'f477', 'f478', 'f479', 'f480', 'f481', 'f482', 'f483', 'f484', 'f485', 'f486', 'f487', 'f488', 'f489', 'f490', 'f491', 'f492', 'f493', 'f494', 'f495', 'f496', 'f497', 'f498', 'f499', 'f500', 'f501', 'f502', 'f503', 'f504', 'f505', 'f506', 'f507', 'f508', 'f509', 'f510', 'f511', 'f512', 'f513', 'f514', 'f515', 'f516', 'f517', 'f518', 'f519', 'f520', 'f521', 'f522', 'f523', 'f524', 'f525', 'f526', 'f527', 'f528', 'f529', 'f530', 'f531', 'f532', 'f533', 'f534', 'f535', 'f536', 'f537', 'f538', 'f539', 'f540', 'f541', 'f542', 'f543', 'f544', 'f545', 'f546', 'f547', 'f548', 'f549', 'f550', 'f551', 'f552', 'f553', 'f554', 'f555', 'f556', 'f557', 'f558', 'f559', 'f560', 'f561', 'f562', 'f563', 'f564', 'f565', 'f566', 'f567', 'f568', 'f569', 'f570', 'f571', 'f572', 'f573', 'f574', 'f575', 'f576', 'f577', 'f578', 'f579', 'f580', 'f581', 'f582', 'f583', 'f584', 'f585', 'f586', 'f587', 'f588', 'f589', 'f590', 'f591', 'f592', 'f593', 'f594', 'f595', 'f596', 'f597', 'f598', 'f599', 'f600', 'f601', 'f602', 'f603', 'f604', 'f605', 'f606', 'f607', 'f608', 'f609', 'f610', 'f611', 'f612', 'f613', 'f614', 'f615', 'f616', 'f617', 'f618', 'f619', 'f620', 'f621', 'f622', 'f623', 'f624', 'f625', 'f626', 'f627', 'f628', 'f629', 'f630', 'f631', 'f632', 'f633', 'f634', 'f635', 'f636', 'f637', 'f638', 'f639', 'f640', 'f641', 'f642', 'f643', 'f644', 'f645', 'f646', 'f647', 'f648', 'f649', 'f650', 'f651', 'f652', 'f653', 'f654', 'f655', 'f656', 'f657', 'f658', 'f659', 'f660', 'f661', 'f662', 'f663', 'f664', 'f665', 'f666', 'f667', 'f668', 'f669', 'f670', 'f671', 'f672', 'f673', 'f674', 'f675', 'f676', 'f677', 'f678', 'f679', 'f680', 'f681', 'f682', 'f683', 'f684', 'f685', 'f686', 'f687', 'f688', 'f689', 'f690', 'f691', 'f692', 'f693', 'f694', 'f695', 'f696', 'f697', 'f698', 'f699', 'f700', 'f701', 'f702', 'f703', 'f704', 'f705', 'f706', 'f707', 'f708', 'f709', 'f710', 'f711', 'f712', 'f713', 'f714', 'f715', 'f716', 'f717', 'f718', 'f719', 'f720', 'f721', 'f722', 'f723', 'f724', 'f725', 'f726', 'f727', 'f728', 'f729', 'f730', 'f731', 'f732', 'f733', 'f734', 'f735', 'f736', 'f737', 'f738', 'f739', 'f740', 'f741', 'f742', 'f743', 'f744', 'f745', 'f746', 'f747', 'f748', 'f749', 'f750', 'f751', 'f752', 'f753', 'f754', 'f755', 'f756', 'f757', 'f758', 'f759', 'f760', 'f761', 'f762', 'f763', 'f764', 'f765', 'f766', 'f767', 'f768', 'f769', 'f770', 'f771', 'f772', 'f773', 'f774', 'f775', 'f776', 'f777', 'f778', 'f779', 'f780', 'f781', 'f782', 'f783', 'f784', 'f785', 'f786', 'f787', 'f788', 'f789', 'f790', 'f791', 'f792', 'f793', 'f794', 'f795', 'f796', 'f797', 'f798', 'f799', 'f800', 'f801', 'f802', 'f803', 'f804', 'f805', 'f806', 'f807', 'f808', 'f809', 'f810', 'f811', 'f812', 'f813', 'f814', 'f815', 'f816', 'f817', 'f818', 'f819', 'f820', 'f821', 'f822', 'f823', 'f824', 'f825', 'f826', 'f827', 'f828', 'f829', 'f830', 'f831', 'f832', 'f833', 'f834', 'f835', 'f836', 'f837', 'f838', 'f839', 'f840', 'f841', 'f842', 'f843', 'f844', 'f845', 'f846', 'f847', 'f848', 'f849', 'f850', 'f851', 'f852', 'f853', 'f854', 'f855', 'f856', 'f857', 'f858', 'f859', 'f860', 'f861', 'f862', 'f863', 'f864', 'f865', 'f866', 'f867', 'f868', 'f869', 'f870', 'f871', 'f872', 'f873', 'f874', 'f875', 'f876', 'f877', 'f878', 'f879', 'f880', 'f881', 'f882', 'f883', 'f884', 'f885', 'f886', 'f887', 'f888', 'f889', 'f890', 'f891', 'f892', 'f893', 'f894', 'f895', 'f896', 'f897', 'f898', 'f899', 'f900', 'f901', 'f902', 'f903', 'f904', 'f905', 'f906', 'f907', 'f908', 'f909', 'f910', 'f911', 'f912', 'f913', 'f914', 'f915', 'f916', 'f917', 'f918', 'f919', 'f920', 'f921', 'f922', 'f923', 'f924', 'f925', 'f926', 'f927', 'f928', 'f929', 'f930', 'f931', 'f932', 'f933', 'f934', 'f935', 'f936', 'f937', 'f938', 'f939', 'f940', 'f941', 'f942', 'f943', 'f944', 'f945', 'f946', 'f947', 'f948', 'f949', 'f950', 'f951', 'f952', 'f953', 'f954', 'f955', 'f956', 'f957', 'f958', 'f959', 'f960', 'f961', 'f962', 'f963', 'f964', 'f965', 'f966', 'f967', 'f968', 'f969', 'f970', 'f971', 'f972', 'f973', 'f974', 'f975', 'f976', 'f977', 'f978', 'f979', 'f980', 'f981', 'f982', 'f983', 'f984', 'f985', 'f986', 'f987', 'f988', 'f989', 'f990', 'f991', 'f992', 'f993', 'f994', 'f995', 'f996', 'f997', 'f998', 'f999', 'f1000', 'f1001', 'f1002', 'f1003', 'f1004', 'f1005', 'f1006', 'f1007', 'f1008', 'f1009', 'f1010', 'f1011', 'f1012', 'f1013', 'f1014', 'f1015', 'f1016', 'f1017', 'f1018', 'f1019', 'f1020', 'f1021', 'f1022', 'f1023', 'f1024', 'f1025', 'f1026', 'f1027', 'f1028', 'f1029', 'f1030', 'f1031', 'f1032', 'f1033', 'f1034', 'f1035', 'f1036', 'f1037', 'f1038', 'f1039', 'f1040', 'f1041', 'f1042', 'f1043', 'f1044', 'f1045', 'f1046', 'f1047', 'f1048', 'f1049', 'f1050', 'f1051', 'f1052', 'f1053', 'f1054', 'f1055', 'f1056', 'f1057', 'f1058', 'f1059', 'f1060', 'f1061', 'f1062', 'f1063', 'f1064', 'f1065', 'f1066', 'f1067', 'f1068', 'f1069', 'f1070', 'f1071', 'f1072', 'f1073', 'f1074', 'f1075', 'f1076', 'f1077', 'f1078', 'f1079', 'f1080', 'f1081', 'f1082', 'f1083', 'f1084', 'f1085', 'f1086', 'f1087', 'f1088', 'f1089', 'f1090', 'f1091', 'f1092', 'f1093', 'f1094', 'f1095', 'f1096', 'f1097', 'f1098', 'f1099', 'f1100', 'f1101', 'f1102', 'f1103', 'f1104', 'f1105', 'f1106', 'f1107', 'f1108', 'f1109', 'f1110', 'f1111', 'f1112', 'f1113', 'f1114', 'f1115', 'f1116', 'f1117', 'f1118', 'f1119', 'f1120', 'f1121', 'f1122', 'f1123', 'f1124', 'f1125', 'f1126', 'f1127', 'f1128', 'f1129', 'f1130', 'f1131', 'f1132', 'f1133', 'f1134', 'f1135', 'f1136', 'f1137', 'f1138', 'f1139', 'f1140', 'f1141', 'f1142', 'f1143', 'f1144', 'f1145', 'f1146', 'f1147', 'f1148', 'f1149', 'f1150', 'f1151', 'f1152', 'f1153', 'f1154', 'f1155', 'f1156', 'f1157', 'f1158', 'f1159', 'f1160', 'f1161', 'f1162', 'f1163', 'f1164', 'f1165', 'f1166', 'f1167', 'f1168', 'f1169', 'f1170', 'f1171', 'f1172', 'f1173', 'f1174', 'f1175', 'f1176', 'f1177', 'f1178', 'f1179', 'f1180', 'f1181', 'f1182', 'f1183', 'f1184', 'f1185', 'f1186', 'f1187', 'f1188', 'f1189', 'f1190', 'f1191', 'f1192', 'f1193', 'f1194', 'f1195', 'f1196', 'f1197', 'f1198', 'f1199', 'f1200', 'f1201', 'f1202', 'f1203', 'f1204', 'f1205', 'f1206', 'f1207', 'f1208', 'f1209', 'f1210', 'f1211', 'f1212', 'f1213', 'f1214', 'f1215', 'f1216', 'f1217', 'f1218', 'f1219', 'f1220', 'f1221', 'f1222', 'f1223', 'f1224', 'f1225', 'f1226', 'f1227', 'f1228', 'f1229', 'f1230', 'f1231', 'f1232', 'f1233', 'f1234', 'f1235', 'f1236', 'f1237', 'f1238', 'f1239', 'f1240', 'f1241', 'f1242', 'f1243', 'f1244', 'f1245', 'f1246', 'f1247', 'f1248', 'f1249', 'f1250', 'f1251', 'f1252', 'f1253', 'f1254', 'f1255', 'f1256', 'f1257', 'f1258', 'f1259', 'f1260', 'f1261', 'f1262', 'f1263', 'f1264', 'f1265', 'f1266', 'f1267', 'f1268', 'f1269', 'f1270', 'f1271', 'f1272', 'f1273', 'f1274', 'f1275', 'f1276', 'f1277', 'f1278', 'f1279', 'f1280', 'f1281', 'f1282', 'f1283', 'f1284', 'f1285', 'f1286', 'f1287', 'f1288', 'f1289', 'f1290', 'f1291', 'f1292', 'f1293', 'f1294', 'f1295', 'f1296', 'f1297', 'f1298', 'f1299', 'f1300', 'f1301', 'f1302', 'f1303', 'f1304', 'f1305', 'f1306', 'f1307', 'f1308', 'f1309', 'f1310', 'f1311', 'f1312', 'f1313', 'f1314', 'f1315', 'f1316', 'f1317', 'f1318', 'f1319', 'f1320', 'f1321', 'f1322', 'f1323', 'f1324', 'f1325', 'f1326', 'f1327', 'f1328', 'f1329', 'f1330', 'f1331', 'f1332', 'f1333', 'f1334', 'f1335', 'f1336', 'f1337', 'f1338', 'f1339', 'f1340', 'f1341', 'f1342', 'f1343', 'f1344', 'f1345', 'f1346', 'f1347', 'f1348', 'f1349', 'f1350', 'f1351', 'f1352', 'f1353', 'f1354', 'f1355', 'f1356', 'f1357', 'f1358', 'f1359', 'f1360', 'f1361', 'f1362', 'f1363', 'f1364', 'f1365', 'f1366', 'f1367', 'f1368', 'f1369', 'f1370', 'f1371', 'f1372', 'f1373', 'f1374', 'f1375', 'f1376', 'f1377', 'f1378', 'f1379', 'f1380', 'f1381', 'f1382', 'f1383', 'f1384', 'f1385', 'f1386', 'f1387', 'f1388', 'f1389', 'f1390', 'f1391', 'f1392', 'f1393', 'f1394', 'f1395', 'f1396', 'f1397', 'f1398', 'f1399', 'f1400', 'f1401', 'f1402', 'f1403', 'f1404', 'f1405', 'f1406', 'f1407', 'f1408', 'f1409', 'f1410', 'f1411', 'f1412', 'f1413', 'f1414', 'f1415', 'f1416', 'f1417', 'f1418', 'f1419', 'f1420', 'f1421', 'f1422', 'f1423', 'f1424', 'f1425', 'f1426', 'f1427', 'f1428', 'f1429', 'f1430', 'f1431', 'f1432', 'f1433', 'f1434', 'f1435', 'f1436', 'f1437', 'f1438', 'f1439', 'f1440', 'f1441', 'f1442', 'f1443', 'f1444', 'f1445', 'f1446', 'f1447', 'f1448', 'f1449', 'f1450', 'f1451', 'f1452', 'f1453', 'f1454', 'f1455', 'f1456', 'f1457', 'f1458', 'f1459', 'f1460', 'f1461', 'f1462', 'f1463', 'f1464', 'f1465', 'f1466', 'f1467', 'f1468', 'f1469', 'f1470', 'f1471', 'f1472', 'f1473', 'f1474', 'f1475', 'f1476', 'f1477', 'f1478', 'f1479', 'f1480', 'f1481', 'f1482', 'f1483', 'f1484', 'f1485', 'f1486', 'f1487', 'f1488', 'f1489', 'f1490', 'f1491', 'f1492', 'f1493', 'f1494', 'f1495', 'f1496', 'f1497', 'f1498', 'f1499', 'f1500', 'f1501', 'f1502', 'f1503', 'f1504', 'f1505', 'f1506', 'f1507', 'f1508', 'f1509', 'f1510', 'f1511', 'f1512', 'f1513', 'f1514', 'f1515', 'f1516', 'f1517', 'f1518', 'f1519', 'f1520', 'f1521', 'f1522', 'f1523', 'f1524', 'f1525', 'f1526', 'f1527', 'f1528', 'f1529', 'f1530', 'f1531', 'f1532', 'f1533', 'f1534', 'f1535', 'f1536', 'f1537', 'f1538', 'f1539', 'f1540', 'f1541', 'f1542', 'f1543', 'f1544', 'f1545', 'f1546', 'f1547', 'f1548', 'f1549', 'f1550', 'f1551', 'f1552', 'f1553', 'f1554', 'f1555', 'f1556', 'f1557', 'f1558', 'f1559', 'f1560', 'f1561', 'f1562', 'f1563', 'f1564', 'f1565', 'f1566', 'f1567', 'f1568', 'f1569', 'f1570', 'f1571', 'f1572', 'f1573', 'f1574', 'f1575', 'f1576', 'f1577', 'f1578', 'f1579', 'f1580', 'f1581', 'f1582', 'f1583', 'f1584', 'f1585', 'f1586', 'f1587', 'f1588', 'f1589', 'f1590', 'f1591', 'f1592', 'f1593', 'f1594', 'f1595', 'f1596', 'f1597', 'f1598', 'f1599', 'f1600', 'f1601', 'f1602', 'f1603', 'f1604', 'f1605', 'f1606', 'f1607', 'f1608', 'f1609', 'f1610', 'f1611', 'f1612', 'f1613', 'f1614', 'f1615', 'f1616', 'f1617', 'f1618', 'f1619', 'f1620', 'f1621', 'f1622', 'f1623', 'f1624', 'f1625', 'f1626', 'f1627', 'f1628', 'f1629', 'f1630', 'f1631', 'f1632', 'f1633', 'f1634', 'f1635', 'f1636', 'f1637', 'f1638', 'f1639', 'f1640', 'f1641', 'f1642', 'f1643', 'f1644', 'f1645', 'f1646', 'f1647', 'f1648', 'f1649', 'f1650', 'f1651', 'f1652', 'f1653', 'f1654', 'f1655', 'f1656', 'f1657', 'f1658', 'f1659', 'f1660', 'f1661', 'f1662', 'f1663', 'f1664', 'f1665', 'f1666', 'f1667', 'f1668', 'f1669', 'f1670', 'f1671', 'f1672', 'f1673', 'f1674', 'f1675', 'f1676', 'f1677', 'f1678', 'f1679', 'f1680', 'f1681', 'f1682', 'f1683', 'f1684', 'f1685', 'f1686', 'f1687', 'f1688', 'f1689', 'f1690', 'f1691', 'f1692', 'f1693', 'f1694', 'f1695', 'f1696', 'f1697', 'f1698', 'f1699', 'f1700', 'f1701', 'f1702', 'f1703', 'f1704', 'f1705', 'f1706', 'f1707', 'f1708', 'f1709', 'f1710', 'f1711', 'f1712', 'f1713', 'f1714', 'f1715', 'f1716', 'f1717', 'f1718', 'f1719', 'f1720', 'f1721', 'f1722', 'f1723', 'f1724', 'f1725', 'f1726', 'f1727', 'f1728', 'f1729', 'f1730', 'f1731', 'f1732', 'f1733', 'f1734', 'f1735', 'f1736', 'f1737', 'f1738', 'f1739', 'f1740', 'f1741', 'f1742', 'f1743', 'f1744', 'f1745', 'f1746', 'f1747', 'f1748', 'f1749', 'f1750', 'f1751', 'f1752', 'f1753', 'f1754', 'f1755', 'f1756', 'f1757', 'f1758', 'f1759', 'f1760', 'f1761', 'f1762', 'f1763', 'f1764', 'f1765', 'f1766', 'f1767', 'f1768', 'f1769', 'f1770', 'f1771', 'f1772', 'f1773', 'f1774', 'f1775', 'f1776', 'f1777', 'f1778', 'f1779', 'f1780', 'f1781', 'f1782', 'f1783', 'f1784', 'f1785', 'f1786', 'f1787', 'f1788', 'f1789', 'f1790', 'f1791', 'f1792', 'f1793', 'f1794', 'f1795', 'f1796', 'f1797', 'f1798', 'f1799', 'f1800', 'f1801', 'f1802', 'f1803', 'f1804', 'f1805', 'f1806', 'f1807', 'f1808', 'f1809', 'f1810', 'f1811', 'f1812', 'f1813', 'f1814', 'f1815', 'f1816', 'f1817', 'f1818', 'f1819', 'f1820', 'f1821', 'f1822', 'f1823', 'f1824', 'f1825', 'f1826', 'f1827', 'f1828', 'f1829', 'f1830', 'f1831', 'f1832', 'f1833', 'f1834', 'f1835', 'f1836', 'f1837', 'f1838', 'f1839', 'f1840', 'f1841', 'f1842', 'f1843', 'f1844', 'f1845', 'f1846', 'f1847', 'f1848', 'f1849', 'f1850', 'f1851', 'f1852', 'f1853', 'f1854', 'f1855', 'f1856', 'f1857', 'f1858', 'f1859', 'f1860', 'f1861', 'f1862', 'f1863', 'f1864', 'f1865', 'f1866', 'f1867', 'f1868', 'f1869', 'f1870', 'f1871', 'f1872', 'f1873', 'f1874', 'f1875', 'f1876', 'f1877', 'f1878', 'f1879', 'f1880', 'f1881', 'f1882', 'f1883', 'f1884', 'f1885', 'f1886', 'f1887', 'f1888', 'f1889', 'f1890', 'f1891', 'f1892', 'f1893', 'f1894', 'f1895', 'f1896', 'f1897', 'f1898', 'f1899', 'f1900', 'f1901', 'f1902', 'f1903', 'f1904', 'f1905', 'f1906', 'f1907', 'f1908', 'f1909', 'f1910', 'f1911', 'f1912', 'f1913', 'f1914', 'f1915', 'f1916', 'f1917', 'f1918', 'f1919', 'f1920', 'f1921', 'f1922', 'f1923', 'f1924', 'f1925', 'f1926', 'f1927', 'f1928', 'f1929', 'f1930', 'f1931', 'f1932', 'f1933', 'f1934', 'f1935', 'f1936', 'f1937', 'f1938', 'f1939', 'f1940', 'f1941', 'f1942', 'f1943', 'f1944', 'f1945', 'f1946', 'f1947', 'f1948', 'f1949', 'f1950', 'f1951', 'f1952', 'f1953', 'f1954', 'f1955', 'f1956', 'f1957', 'f1958', 'f1959', 'f1960', 'f1961', 'f1962', 'f1963', 'f1964', 'f1965', 'f1966', 'f1967', 'f1968', 'f1969', 'f1970', 'f1971', 'f1972', 'f1973', 'f1974', 'f1975', 'f1976', 'f1977', 'f1978', 'f1979', 'f1980', 'f1981', 'f1982', 'f1983', 'f1984', 'f1985', 'f1986', 'f1987', 'f1988', 'f1989', 'f1990', 'f1991', 'f1992', 'f1993', 'f1994', 'f1995', 'f1996', 'f1997', 'f1998', 'f1999', 'f2000', 'f2001', 'f2002', 'f2003', 'f2004', 'f2005', 'f2006', 'f2007', 'f2008', 'f2009', 'f2010', 'f2011', 'f2012', 'f2013', 'f2014', 'f2015', 'f2016', 'f2017', 'f2018', 'f2019', 'f2020', 'f2021', 'f2022', 'f2023', 'f2024', 'f2025', 'f2026', 'f2027', 'f2028', 'f2029', 'f2030', 'f2031', 'f2032', 'f2033', 'f2034', 'f2035', 'f2036', 'f2037', 'f2038', 'f2039', 'f2040', 'f2041', 'f2042', 'f2043', 'f2044', 'f2045', 'f2046', 'f2047', 'f2048', 'f2049', 'f2050', 'f2051', 'f2052', 'f2053', 'f2054', 'f2055', 'f2056', 'f2057', 'f2058', 'f2059', 'f2060', 'f2061', 'f2062', 'f2063', 'f2064', 'f2065', 'f2066', 'f2067', 'f2068', 'f2069', 'f2070', 'f2071', 'f2072', 'f2073', 'f2074', 'f2075', 'f2076', 'f2077', 'f2078', 'f2079', 'f2080', 'f2081', 'f2082', 'f2083', 'f2084', 'f2085', 'f2086', 'f2087', 'f2088', 'f2089', 'f2090', 'f2091', 'f2092', 'f2093', 'f2094', 'f2095', 'f2096', 'f2097', 'f2098', 'f2099', 'f2100', 'f2101', 'f2102', 'f2103', 'f2104', 'f2105', 'f2106', 'f2107', 'f2108', 'f2109', 'f2110', 'f2111', 'f2112', 'f2113', 'f2114', 'f2115', 'f2116', 'f2117', 'f2118', 'f2119', 'f2120', 'f2121', 'f2122', 'f2123', 'f2124', 'f2125', 'f2126', 'f2127', 'f2128', 'f2129', 'f2130', 'f2131', 'f2132', 'f2133', 'f2134', 'f2135', 'f2136', 'f2137', 'f2138', 'f2139', 'f2140', 'f2141', 'f2142', 'f2143', 'f2144', 'f2145', 'f2146', 'f2147', 'f2148', 'f2149', 'f2150', 'f2151', 'f2152', 'f2153', 'f2154', 'f2155', 'f2156', 'f2157', 'f2158', 'f2159', 'f2160', 'f2161', 'f2162', 'f2163', 'f2164', 'f2165', 'f2166', 'f2167', 'f2168', 'f2169', 'f2170', 'f2171', 'f2172', 'f2173', 'f2174', 'f2175', 'f2176', 'f2177', 'f2178', 'f2179', 'f2180', 'f2181', 'f2182', 'f2183', 'f2184', 'f2185', 'f2186', 'f2187', 'f2188', 'f2189', 'f2190', 'f2191', 'f2192', 'f2193', 'f2194', 'f2195', 'f2196', 'f2197', 'f2198', 'f2199', 'f2200', 'f2201', 'f2202', 'f2203', 'f2204', 'f2205', 'f2206', 'f2207', 'f2208', 'f2209', 'f2210', 'f2211', 'f2212', 'f2213', 'f2214', 'f2215', 'f2216', 'f2217', 'f2218', 'f2219', 'f2220', 'f2221', 'f2222', 'f2223', 'f2224', 'f2225', 'f2226', 'f2227', 'f2228', 'f2229', 'f2230', 'f2231', 'f2232', 'f2233', 'f2234', 'f2235', 'f2236', 'f2237', 'f2238', 'f2239', 'f2240', 'f2241', 'f2242', 'f2243', 'f2244', 'f2245', 'f2246', 'f2247', 'f2248', 'f2249', 'f2250', 'f2251', 'f2252', 'f2253', 'f2254', 'f2255', 'f2256', 'f2257', 'f2258', 'f2259', 'f2260', 'f2261', 'f2262', 'f2263', 'f2264', 'f2265', 'f2266', 'f2267', 'f2268', 'f2269', 'f2270', 'f2271', 'f2272', 'f2273', 'f2274', 'f2275', 'f2276', 'f2277', 'f2278', 'f2279', 'f2280', 'f2281', 'f2282', 'f2283', 'f2284', 'f2285', 'f2286', 'f2287', 'f2288', 'f2289', 'f2290', 'f2291', 'f2292', 'f2293', 'f2294', 'f2295', 'f2296', 'f2297', 'f2298', 'f2299', 'f2300', 'f2301', 'f2302', 'f2303', 'f2304', 'f2305', 'f2306', 'f2307', 'f2308', 'f2309', 'f2310', 'f2311', 'f2312', 'f2313', 'f2314', 'f2315', 'f2316', 'f2317', 'f2318', 'f2319', 'f2320', 'f2321', 'f2322', 'f2323', 'f2324', 'f2325', 'f2326', 'f2327', 'f2328', 'f2329', 'f2330', 'f2331', 'f2332', 'f2333', 'f2334', 'f2335', 'f2336', 'f2337', 'f2338', 'f2339', 'f2340', 'f2341', 'f2342', 'f2343', 'f2344', 'f2345', 'f2346', 'f2347', 'f2348', 'f2349', 'f2350', 'f2351', 'f2352', 'f2353', 'f2354', 'f2355', 'f2356', 'f2357', 'f2358', 'f2359', 'f2360', 'f2361', 'f2362', 'f2363', 'f2364', 'f2365', 'f2366', 'f2367', 'f2368', 'f2369', 'f2370', 'f2371', 'f2372', 'f2373', 'f2374', 'f2375', 'f2376', 'f2377', 'f2378', 'f2379', 'f2380', 'f2381', 'f2382', 'f2383', 'f2384', 'f2385', 'f2386', 'f2387', 'f2388', 'f2389', 'f2390', 'f2391', 'f2392', 'f2393', 'f2394', 'f2395', 'f2396', 'f2397', 'f2398', 'f2399', 'f2400', 'f2401', 'f2402', 'f2403', 'f2404', 'f2405', 'f2406', 'f2407', 'f2408', 'f2409', 'f2410', 'f2411', 'f2412', 'f2413', 'f2414', 'f2415', 'f2416', 'f2417', 'f2418', 'f2419', 'f2420', 'f2421', 'f2422', 'f2423', 'f2424', 'f2425', 'f2426', 'f2427', 'f2428', 'f2429', 'f2430', 'f2431', 'f2432', 'f2433', 'f2434', 'f2435', 'f2436', 'f2437', 'f2438', 'f2439', 'f2440', 'f2441', 'f2442', 'f2443', 'f2444', 'f2445', 'f2446', 'f2447', 'f2448', 'f2449', 'f2450', 'f2451', 'f2452', 'f2453', 'f2454', 'f2455', 'f2456', 'f2457', 'f2458', 'f2459', 'f2460', 'f2461', 'f2462', 'f2463', 'f2464', 'f2465', 'f2466', 'f2467', 'f2468', 'f2469', 'f2470', 'f2471', 'f2472', 'f2473', 'f2474', 'f2475', 'f2476', 'f2477', 'f2478', 'f2479', 'f2480', 'f2481', 'f2482', 'f2483', 'f2484', 'f2485', 'f2486', 'f2487', 'f2488', 'f2489', 'f2490', 'f2491', 'f2492', 'f2493', 'f2494', 'f2495', 'f2496', 'f2497', 'f2498', 'f2499', 'f2500', 'f2501', 'f2502', 'f2503', 'f2504', 'f2505', 'f2506', 'f2507', 'f2508', 'f2509', 'f2510', 'f2511', 'f2512', 'f2513', 'f2514', 'f2515', 'f2516', 'f2517', 'f2518', 'f2519', 'f2520', 'f2521', 'f2522', 'f2523', 'f2524', 'f2525', 'f2526', 'f2527', 'f2528', 'f2529', 'f2530', 'f2531', 'f2532', 'f2533', 'f2534', 'f2535', 'f2536', 'f2537', 'f2538', 'f2539', 'f2540', 'f2541', 'f2542', 'f2543', 'f2544', 'f2545', 'f2546', 'f2547', 'f2548', 'f2549', 'f2550', 'f2551', 'f2552', 'f2553', 'f2554', 'f2555', 'f2556', 'f2557', 'f2558', 'f2559', 'f2560', 'f2561', 'f2562', 'f2563', 'f2564', 'f2565', 'f2566', 'f2567', 'f2568', 'f2569', 'f2570', 'f2571', 'f2572', 'f2573', 'f2574', 'f2575', 'f2576', 'f2577', 'f2578', 'f2579', 'f2580'] ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27', 'f28', 'f29', 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', 'f38', 'f39', 'f40', 'f41', 'f42', 'f43', 'f44', 'f45', 'f46', 'f47', 'f48', 'f49', 'f50', 'f51', 'f52', 'f53', 'f54', 'f55', 'f56', 'f57', 'f58', 'f59', 'f60', 'f61', 'f62', 'f63', 'f64', 'f65', 'f66', 'f67', 'f68', 'f69', 'f70', 'f71', 'f72', 'f73', 'f74', 'f75', 'f76', 'f77', 'f78', 'f79', 'f80', 'f81', 'f82', 'f83', 'f84', 'f85', 'f86', 'f87', 'f88', 'f89', 'f90', 'f91', 'f92', 'f93', 'f94', 'f95', 'f96', 'f97', 'f98', 'f99', 'f100', 'f101', 'f102', 'f103', 'f104', 'f105', 'f106', 'f107', 'f108', 'f109', 'f110', 'f111', 'f112', 'f113', 'f114', 'f115', 'f116', 'f117', 'f118', 'f119', 'f120', 'f121', 'f122', 'f123', 'f124', 'f125', 'f126', 'f127', 'f128', 'f129', 'f130', 'f131', 'f132', 'f133', 'f134', 'f135', 'f136', 'f137', 'f138', 'f139', 'f140', 'f141', 'f142', 'f143', 'f144', 'f145', 'f146', 'f147', 'f148', 'f149', 'f150', 'f151', 'f152', 'f153', 'f154', 'f155', 'f156', 'f157', 'f158', 'f159', 'f160', 'f161', 'f162', 'f163', 'f164', 'f165', 'f166', 'f167', 'f168', 'f169', 'f170', 'f171', 'f172', 'f173', 'f174', 'f175', 'f176', 'f177', 'f178', 'f179', 'f180', 'f181', 'f182', 'f183', 'f184', 'f185', 'f186', 'f187', 'f188', 'f189', 'f190', 'f191', 'f192', 'f193', 'f194', 'f195', 'f196', 'f197', 'f198', 'f199', 'f200', 'f201', 'f202', 'f203', 'f204', 'f205', 'f206', 'f207', 'f208', 'f209', 'f210', 'f211', 'f212', 'f213', 'f214', 'f215', 'f216', 'f217', 'f218', 'f219', 'f220', 'f221', 'f222', 'f223', 'f224', 'f225', 'f226', 'f227', 'f228', 'f229', 'f230', 'f231', 'f232', 'f233', 'f234', 'f235', 'f236', 'f237', 'f238', 'f239', 'f240', 'f241', 'f242', 'f243', 'f244', 'f245', 'f246', 'f247', 'f248', 'f249', 'f250', 'f251', 'f252', 'f253', 'f254', 'f255', 'f256', 'f257', 'f258', 'f259', 'f260', 'f261', 'f262', 'f263', 'f264', 'f265', 'f266', 'f267', 'f268', 'f269', 'f270', 'f271', 'f272', 'f273', 'f274', 'f275', 'f276', 'f277', 'f278', 'f279', 'f280', 'f281', 'f282', 'f283', 'f284', 'f285', 'f286', 'f287', 'f288', 'f289', 'f290', 'f291', 'f292', 'f293', 'f294', 'f295', 'f296', 'f297', 'f298', 'f299', 'f300', 'f301', 'f302', 'f303', 'f304', 'f305', 'f306', 'f307', 'f308', 'f309', 'f310', 'f311', 'f312', 'f313', 'f314', 'f315', 'f316', 'f317', 'f318', 'f319', 'f320', 'f321', 'f322', 'f323', 'f324', 'f325', 'f326', 'f327', 'f328', 'f329', 'f330', 'f331', 'f332', 'f333', 'f334', 'f335', 'f336', 'f337', 'f338', 'f339', 'f340', 'f341', 'f342', 'f343', 'f344', 'f345', 'f346', 'f347', 'f348', 'f349', 'f350', 'f351', 'f352', 'f353', 'f354', 'f355', 'f356', 'f357', 'f358', 'f359', 'f360', 'f361', 'f362', 'f363', 'f364', 'f365', 'f366', 'f367', 'f368', 'f369', 'f370', 'f371', 'f372', 'f373', 'f374', 'f375', 'f376', 'f377', 'f378', 'f379', 'f380', 'f381', 'f382', 'f383', 'f384', 'f385', 'f386', 'f387', 'f388', 'f389', 'f390', 'f391', 'f392', 'f393', 'f394', 'f395', 'f396', 'f397', 'f398', 'f399', 'f400', 'f401', 'f402', 'f403', 'f404', 'f405', 'f406', 'f407', 'f408', 'f409', 'f410', 'f411', 'f412', 'f413', 'f414', 'f415', 'f416', 'f417', 'f418', 'f419', 'f420', 'f421', 'f422', 'f423', 'f424', 'f425', 'f426', 'f427', 'f428', 'f429', 'f430', 'f431', 'f432', 'f433', 'f434', 'f435', 'f436', 'f437', 'f438', 'f439', 'f440', 'f441', 'f442', 'f443', 'f444', 'f445', 'f446', 'f447', 'f448', 'f449', 'f450', 'f451', 'f452', 'f453', 'f454', 'f455', 'f456', 'f457', 'f458', 'f459', 'f460', 'f461', 'f462', 'f463', 'f464', 'f465', 'f466', 'f467', 'f468', 'f469', 'f470', 'f471', 'f472', 'f473', 'f474', 'f475', 'f476', 'f477', 'f478', 'f479', 'f480', 'f481', 'f482', 'f483', 'f484', 'f485', 'f486', 'f487', 'f488', 'f489', 'f490', 'f491', 'f492', 'f493', 'f494', 'f495', 'f496', 'f497', 'f498', 'f499', 'f500', 'f501', 'f502', 'f503', 'f504', 'f505', 'f506', 'f507', 'f508', 'f509', 'f510', 'f511', 'f512', 'f513', 'f514', 'f515', 'f516', 'f517', 'f518', 'f519', 'f520', 'f521', 'f522', 'f523', 'f524', 'f525', 'f526', 'f527', 'f528', 'f529', 'f530', 'f531', 'f532', 'f533', 'f534', 'f535', 'f536', 'f537', 'f538', 'f539', 'f540', 'f541', 'f542', 'f543', 'f544', 'f545', 'f546', 'f547', 'f548', 'f549', 'f550', 'f551', 'f552', 'f553', 'f554', 'f555', 'f556', 'f557', 'f558', 'f559', 'f560', 'f561', 'f562', 'f563', 'f564', 'f565', 'f566', 'f567', 'f568', 'f569', 'f570', 'f571', 'f572', 'f573', 'f574', 'f575', 'f576', 'f577', 'f578', 'f579', 'f580', 'f581', 'f582', 'f583', 'f584', 'f585', 'f586', 'f587', 'f588', 'f589', 'f590', 'f591', 'f592', 'f593', 'f594', 'f595', 'f596', 'f597', 'f598', 'f599', 'f600', 'f601', 'f602', 'f603', 'f604', 'f605', 'f606', 'f607', 'f608', 'f609', 'f610', 'f611', 'f612', 'f613', 'f614', 'f615', 'f616', 'f617', 'f618', 'f619', 'f620', 'f621', 'f622', 'f623', 'f624', 'f625', 'f626', 'f627', 'f628', 'f629', 'f630', 'f631', 'f632', 'f633', 'f634', 'f635', 'f636', 'f637', 'f638', 'f639', 'f640', 'f641', 'f642', 'f643', 'f644', 'f645', 'f646', 'f647', 'f648', 'f649', 'f650', 'f651', 'f652', 'f653', 'f654', 'f655', 'f656', 'f657', 'f658', 'f659', 'f660', 'f661', 'f662', 'f663', 'f664', 'f665', 'f666', 'f667', 'f668', 'f669', 'f670', 'f671', 'f672', 'f673', 'f674', 'f675', 'f676', 'f677', 'f678', 'f679', 'f680', 'f681', 'f682', 'f683', 'f684', 'f685', 'f686', 'f687', 'f688', 'f689', 'f690', 'f691', 'f692', 'f693', 'f694', 'f695', 'f696', 'f697', 'f698', 'f699', 'f700', 'f701', 'f702', 'f703', 'f704', 'f705', 'f706', 'f707', 'f708', 'f709', 'f710', 'f711', 'f712', 'f713', 'f714', 'f715', 'f716', 'f717', 'f718', 'f719', 'f720', 'f721', 'f722', 'f723', 'f724', 'f725', 'f726', 'f727', 'f728', 'f729', 'f730', 'f731', 'f732', 'f733', 'f734', 'f735', 'f736', 'f737', 'f738', 'f739', 'f740', 'f741', 'f742', 'f743', 'f744', 'f745', 'f746', 'f747', 'f748', 'f749', 'f750', 'f751', 'f752', 'f753', 'f754', 'f755', 'f756', 'f757', 'f758', 'f759', 'f760', 'f761', 'f762', 'f763', 'f764', 'f765', 'f766', 'f767', 'f768', 'f769', 'f770', 'f771', 'f772', 'f773', 'f774', 'f775', 'f776', 'f777', 'f778', 'f779', 'f780', 'f781', 'f782', 'f783', 'f784', 'f785', 'f786', 'f787', 'f788', 'f789', 'f790', 'f791', 'f792', 'f793', 'f794', 'f795', 'f796', 'f797', 'f798', 'f799', 'f800', 'f801', 'f802', 'f803', 'f804', 'f805', 'f806', 'f807', 'f808', 'f809', 'f810', 'f811', 'f812', 'f813', 'f814', 'f815', 'f816', 'f817', 'f818', 'f819', 'f820', 'f821', 'f822', 'f823', 'f824', 'f825', 'f826', 'f827', 'f828', 'f829', 'f830', 'f831', 'f832', 'f833', 'f834', 'f835', 'f836', 'f837', 'f838', 'f839', 'f840', 'f841', 'f842', 'f843', 'f844', 'f845', 'f846', 'f847', 'f848', 'f849', 'f850', 'f851', 'f852', 'f853', 'f854', 'f855', 'f856', 'f857', 'f858', 'f859', 'f860', 'f861', 'f862', 'f863', 'f864', 'f865', 'f866', 'f867', 'f868', 'f869', 'f870', 'f871', 'f872', 'f873', 'f874', 'f875', 'f876', 'f877', 'f878', 'f879', 'f880', 'f881', 'f882', 'f883', 'f884', 'f885', 'f886', 'f887', 'f888', 'f889', 'f890', 'f891', 'f892', 'f893', 'f894', 'f895', 'f896', 'f897', 'f898', 'f899', 'f900', 'f901', 'f902', 'f903', 'f904', 'f905', 'f906', 'f907', 'f908', 'f909', 'f910', 'f911', 'f912', 'f913', 'f914', 'f915', 'f916', 'f917', 'f918', 'f919', 'f920', 'f921', 'f922', 'f923', 'f924', 'f925', 'f926', 'f927', 'f928', 'f929', 'f930', 'f931', 'f932', 'f933', 'f934', 'f935', 'f936', 'f937', 'f938', 'f939', 'f940', 'f941', 'f942', 'f943', 'f944', 'f945', 'f946', 'f947', 'f948', 'f949', 'f950', 'f951', 'f952', 'f953', 'f954', 'f955', 'f956', 'f957', 'f958', 'f959', 'f960', 'f961', 'f962', 'f963', 'f964', 'f965', 'f966', 'f967', 'f968', 'f969', 'f970', 'f971', 'f972', 'f973', 'f974', 'f975', 'f976', 'f977', 'f978', 'f979', 'f980', 'f981', 'f982', 'f983', 'f984', 'f985', 'f986', 'f987', 'f988', 'f989', 'f990', 'f991', 'f992', 'f993', 'f994', 'f995', 'f996', 'f997', 'f998', 'f999', 'f1000', 'f1001', 'f1002', 'f1003', 'f1004', 'f1005', 'f1006', 'f1007', 'f1008', 'f1009', 'f1010', 'f1011', 'f1012', 'f1013', 'f1014', 'f1015', 'f1016', 'f1017', 'f1018', 'f1019', 'f1020', 'f1021', 'f1022', 'f1023', 'f1024', 'f1025', 'f1026', 'f1027', 'f1028', 'f1029', 'f1030', 'f1031', 'f1032', 'f1033', 'f1034', 'f1035', 'f1036', 'f1037', 'f1038', 'f1039', 'f1040', 'f1041', 'f1042', 'f1043', 'f1044', 'f1045', 'f1046', 'f1047', 'f1048', 'f1049', 'f1050', 'f1051', 'f1052', 'f1053', 'f1054', 'f1055', 'f1056', 'f1057', 'f1058', 'f1059', 'f1060', 'f1061', 'f1062', 'f1063', 'f1064', 'f1065', 'f1066', 'f1067', 'f1068', 'f1069', 'f1070', 'f1071', 'f1072', 'f1073', 'f1074', 'f1075', 'f1076', 'f1077', 'f1078', 'f1079', 'f1080', 'f1081', 'f1082', 'f1083', 'f1084', 'f1085', 'f1086', 'f1087', 'f1088', 'f1089', 'f1090', 'f1091', 'f1092', 'f1093', 'f1094', 'f1095', 'f1096', 'f1097', 'f1098', 'f1099', 'f1100', 'f1101', 'f1102', 'f1103', 'f1104', 'f1105', 'f1106', 'f1107', 'f1108', 'f1109', 'f1110', 'f1111', 'f1112', 'f1113', 'f1114', 'f1115', 'f1116', 'f1117', 'f1118', 'f1119', 'f1120', 'f1121', 'f1122', 'f1123', 'f1124', 'f1125', 'f1126', 'f1127', 'f1128', 'f1129', 'f1130', 'f1131', 'f1132', 'f1133', 'f1134', 'f1135', 'f1136', 'f1137', 'f1138', 'f1139', 'f1140', 'f1141', 'f1142', 'f1143', 'f1144', 'f1145', 'f1146', 'f1147', 'f1148', 'f1149', 'f1150', 'f1151', 'f1152', 'f1153', 'f1154', 'f1155', 'f1156', 'f1157', 'f1158', 'f1159', 'f1160', 'f1161', 'f1162', 'f1163', 'f1164', 'f1165', 'f1166', 'f1167', 'f1168', 'f1169', 'f1170', 'f1171', 'f1172', 'f1173', 'f1174', 'f1175', 'f1176', 'f1177', 'f1178', 'f1179', 'f1180', 'f1181', 'f1182', 'f1183', 'f1184', 'f1185', 'f1186', 'f1187', 'f1188', 'f1189', 'f1190', 'f1191', 'f1192', 'f1193', 'f1194', 'f1195', 'f1196', 'f1197', 'f1198', 'f1199', 'f1200', 'f1201', 'f1202', 'f1203', 'f1204', 'f1205', 'f1206', 'f1207', 'f1208', 'f1209', 'f1210', 'f1211', 'f1212', 'f1213', 'f1214', 'f1215', 'f1216', 'f1217', 'f1218', 'f1219', 'f1220', 'f1221', 'f1222', 'f1223', 'f1224', 'f1225', 'f1226', 'f1227', 'f1228', 'f1229', 'f1230', 'f1231', 'f1232', 'f1233', 'f1234', 'f1235', 'f1236', 'f1237', 'f1238', 'f1239', 'f1240', 'f1241', 'f1242', 'f1243', 'f1244', 'f1245', 'f1246', 'f1247', 'f1248', 'f1249', 'f1250', 'f1251', 'f1252', 'f1253', 'f1254', 'f1255', 'f1256', 'f1257', 'f1258', 'f1259', 'f1260', 'f1261', 'f1262', 'f1263', 'f1264', 'f1265', 'f1266', 'f1267', 'f1268', 'f1269', 'f1270', 'f1271', 'f1272', 'f1273', 'f1274', 'f1275', 'f1276', 'f1277', 'f1278', 'f1279', 'f1280', 'f1281', 'f1282', 'f1283', 'f1284', 'f1285', 'f1286', 'f1287', 'f1288', 'f1289', 'f1290', 'f1291', 'f1292', 'f1293', 'f1294', 'f1295', 'f1296', 'f1297', 'f1298', 'f1299', 'f1300', 'f1301', 'f1302', 'f1303', 'f1304', 'f1305', 'f1306', 'f1307', 'f1308', 'f1309', 'f1310', 'f1311', 'f1312', 'f1313', 'f1314', 'f1315', 'f1316', 'f1317', 'f1318', 'f1319', 'f1320', 'f1321', 'f1322', 'f1323', 'f1324', 'f1325', 'f1326', 'f1327', 'f1328', 'f1329', 'f1330', 'f1331', 'f1332', 'f1333', 'f1334', 'f1335', 'f1336', 'f1337', 'f1338', 'f1339', 'f1340', 'f1341', 'f1342', 'f1343', 'f1344', 'f1345', 'f1346', 'f1347', 'f1348', 'f1349', 'f1350', 'f1351', 'f1352', 'f1353', 'f1354', 'f1355', 'f1356', 'f1357', 'f1358', 'f1359', 'f1360', 'f1361', 'f1362', 'f1363', 'f1364', 'f1365', 'f1366', 'f1367', 'f1368', 'f1369', 'f1370', 'f1371', 'f1372', 'f1373', 'f1374', 'f1375', 'f1376', 'f1377', 'f1378', 'f1379', 'f1380', 'f1381', 'f1382', 'f1383', 'f1384', 'f1385', 'f1386', 'f1387', 'f1388', 'f1389', 'f1390', 'f1391', 'f1392', 'f1393', 'f1394', 'f1395', 'f1396', 'f1397', 'f1398', 'f1399', 'f1400', 'f1401', 'f1402', 'f1403', 'f1404', 'f1405', 'f1406', 'f1407', 'f1408', 'f1409', 'f1410', 'f1411', 'f1412', 'f1413', 'f1414', 'f1415', 'f1416', 'f1417', 'f1418', 'f1419', 'f1420', 'f1421', 'f1422', 'f1423', 'f1424', 'f1425', 'f1426', 'f1427', 'f1428', 'f1429', 'f1430', 'f1431', 'f1432', 'f1433', 'f1434', 'f1435', 'f1436', 'f1437', 'f1438', 'f1439', 'f1440', 'f1441', 'f1442', 'f1443', 'f1444', 'f1445', 'f1446', 'f1447', 'f1448', 'f1449', 'f1450', 'f1451', 'f1452', 'f1453', 'f1454', 'f1455', 'f1456', 'f1457', 'f1458', 'f1459', 'f1460', 'f1461', 'f1462', 'f1463', 'f1464', 'f1465', 'f1466', 'f1467', 'f1468', 'f1469', 'f1470', 'f1471', 'f1472', 'f1473', 'f1474', 'f1475', 'f1476', 'f1477', 'f1478', 'f1479', 'f1480', 'f1481', 'f1482', 'f1483', 'f1484', 'f1485', 'f1486', 'f1487', 'f1488', 'f1489', 'f1490', 'f1491', 'f1492', 'f1493', 'f1494', 'f1495', 'f1496', 'f1497', 'f1498', 'f1499', 'f1500', 'f1501', 'f1502', 'f1503', 'f1504', 'f1505', 'f1506', 'f1507', 'f1508', 'f1509', 'f1510', 'f1511', 'f1512', 'f1513', 'f1514', 'f1515', 'f1516', 'f1517', 'f1518', 'f1519', 'f1520', 'f1521', 'f1522', 'f1523', 'f1524', 'f1525', 'f1526', 'f1527', 'f1528', 'f1529', 'f1530', 'f1531', 'f1532', 'f1533', 'f1534', 'f1535', 'f1536', 'f1537', 'f1538', 'f1539', 'f1540', 'f1541', 'f1542', 'f1543', 'f1544', 'f1545', 'f1546', 'f1547', 'f1548', 'f1549', 'f1550', 'f1551', 'f1552', 'f1553', 'f1554', 'f1555', 'f1556', 'f1557', 'f1558', 'f1559', 'f1560', 'f1561', 'f1562', 'f1563', 'f1564', 'f1565', 'f1566', 'f1567', 'f1568', 'f1569', 'f1570', 'f1571', 'f1572', 'f1573', 'f1574', 'f1575', 'f1576', 'f1577', 'f1578', 'f1579', 'f1580', 'f1581', 'f1582', 'f1583', 'f1584', 'f1585', 'f1586', 'f1587', 'f1588', 'f1589', 'f1590', 'f1591', 'f1592', 'f1593', 'f1594', 'f1595', 'f1596', 'f1597', 'f1598', 'f1599', 'f1600', 'f1601', 'f1602', 'f1603', 'f1604', 'f1605', 'f1606', 'f1607', 'f1608', 'f1609', 'f1610', 'f1611', 'f1612', 'f1613', 'f1614', 'f1615', 'f1616', 'f1617', 'f1618', 'f1619', 'f1620', 'f1621', 'f1622', 'f1623', 'f1624', 'f1625', 'f1626', 'f1627', 'f1628', 'f1629', 'f1630', 'f1631', 'f1632', 'f1633', 'f1634', 'f1635', 'f1636', 'f1637', 'f1638', 'f1639', 'f1640', 'f1641', 'f1642', 'f1643', 'f1644', 'f1645', 'f1646', 'f1647', 'f1648', 'f1649', 'f1650', 'f1651', 'f1652', 'f1653', 'f1654', 'f1655', 'f1656', 'f1657', 'f1658', 'f1659', 'f1660', 'f1661', 'f1662', 'f1663', 'f1664', 'f1665', 'f1666', 'f1667', 'f1668', 'f1669', 'f1670', 'f1671', 'f1672', 'f1673', 'f1674', 'f1675', 'f1676', 'f1677', 'f1678', 'f1679', 'f1680', 'f1681', 'f1682', 'f1683', 'f1684', 'f1685', 'f1686', 'f1687', 'f1688', 'f1689', 'f1690', 'f1691', 'f1692', 'f1693', 'f1694', 'f1695', 'f1696', 'f1697', 'f1698', 'f1699', 'f1700', 'f1701', 'f1702', 'f1703', 'f1704', 'f1705', 'f1706', 'f1707', 'f1708', 'f1709', 'f1710', 'f1711', 'f1712', 'f1713', 'f1714', 'f1715', 'f1716', 'f1717', 'f1718', 'f1719', 'f1720', 'f1721', 'f1722', 'f1723', 'f1724', 'f1725', 'f1726', 'f1727', 'f1728', 'f1729', 'f1730', 'f1731', 'f1732', 'f1733', 'f1734', 'f1735', 'f1736', 'f1737', 'f1738', 'f1739', 'f1740', 'f1741', 'f1742', 'f1743', 'f1744', 'f1745', 'f1746', 'f1747', 'f1748', 'f1749', 'f1750', 'f1751', 'f1752', 'f1753', 'f1754', 'f1755', 'f1756', 'f1757', 'f1758', 'f1759', 'f1760', 'f1761', 'f1762', 'f1763', 'f1764', 'f1765', 'f1766', 'f1767', 'f1768', 'f1769', 'f1770', 'f1771', 'f1772', 'f1773', 'f1774', 'f1775', 'f1776', 'f1777', 'f1778', 'f1779', 'f1780', 'f1781', 'f1782', 'f1783', 'f1784', 'f1785', 'f1786', 'f1787', 'f1788', 'f1789', 'f1790', 'f1791', 'f1792', 'f1793', 'f1794', 'f1795', 'f1796', 'f1797', 'f1798', 'f1799', 'f1800', 'f1801', 'f1802', 'f1803', 'f1804', 'f1805', 'f1806', 'f1807', 'f1808', 'f1809', 'f1810', 'f1811', 'f1812', 'f1813', 'f1814', 'f1815', 'f1816', 'f1817', 'f1818', 'f1819', 'f1820', 'f1821', 'f1822', 'f1823', 'f1824', 'f1825', 'f1826', 'f1827', 'f1828', 'f1829', 'f1830', 'f1831', 'f1832', 'f1833', 'f1834', 'f1835', 'f1836', 'f1837', 'f1838', 'f1839', 'f1840', 'f1841', 'f1842', 'f1843', 'f1844', 'f1845', 'f1846', 'f1847', 'f1848', 'f1849', 'f1850', 'f1851', 'f1852', 'f1853', 'f1854', 'f1855', 'f1856', 'f1857', 'f1858', 'f1859', 'f1860', 'f1861', 'f1862', 'f1863', 'f1864', 'f1865', 'f1866', 'f1867', 'f1868', 'f1869', 'f1870', 'f1871', 'f1872', 'f1873', 'f1874', 'f1875', 'f1876', 'f1877', 'f1878', 'f1879', 'f1880', 'f1881', 'f1882', 'f1883', 'f1884', 'f1885', 'f1886', 'f1887', 'f1888', 'f1889', 'f1890', 'f1891', 'f1892', 'f1893', 'f1894', 'f1895', 'f1896', 'f1897', 'f1898', 'f1899', 'f1900', 'f1901', 'f1902', 'f1903', 'f1904', 'f1905', 'f1906', 'f1907', 'f1908', 'f1909', 'f1910', 'f1911', 'f1912', 'f1913', 'f1914', 'f1915', 'f1916', 'f1917', 'f1918', 'f1919', 'f1920', 'f1921', 'f1922', 'f1923', 'f1924', 'f1925', 'f1926', 'f1927', 'f1928', 'f1929', 'f1930', 'f1931', 'f1932', 'f1933', 'f1934', 'f1935', 'f1936', 'f1937', 'f1938', 'f1939', 'f1940', 'f1941', 'f1942', 'f1943', 'f1944', 'f1945', 'f1946', 'f1947', 'f1948', 'f1949', 'f1950', 'f1951', 'f1952', 'f1953', 'f1954', 'f1955', 'f1956', 'f1957', 'f1958', 'f1959', 'f1960', 'f1961', 'f1962', 'f1963', 'f1964', 'f1965', 'f1966', 'f1967', 'f1968', 'f1969', 'f1970', 'f1971', 'f1972', 'f1973', 'f1974', 'f1975', 'f1976', 'f1977', 'f1978', 'f1979', 'f1980', 'f1981', 'f1982', 'f1983', 'f1984', 'f1985', 'f1986', 'f1987', 'f1988', 'f1989', 'f1990', 'f1991', 'f1992', 'f1993', 'f1994', 'f1995', 'f1996', 'f1997', 'f1998', 'f1999', 'f2000', 'f2001', 'f2002', 'f2003', 'f2004', 'f2005', 'f2006', 'f2007', 'f2008', 'f2009', 'f2010', 'f2011', 'f2012', 'f2013', 'f2014', 'f2015', 'f2016', 'f2017', 'f2018', 'f2019', 'f2020', 'f2021', 'f2022', 'f2023', 'f2024', 'f2025', 'f2026', 'f2027', 'f2028', 'f2029', 'f2030', 'f2031', 'f2032', 'f2033', 'f2034', 'f2035', 'f2036', 'f2037', 'f2038', 'f2039', 'f2040', 'f2041', 'f2042', 'f2043', 'f2044', 'f2045', 'f2046', 'f2047', 'f2048', 'f2049', 'f2050', 'f2051', 'f2052', 'f2053', 'f2054', 'f2055', 'f2056', 'f2057', 'f2058', 'f2059', 'f2060', 'f2061', 'f2062', 'f2063', 'f2064', 'f2065', 'f2066', 'f2067', 'f2068', 'f2069', 'f2070', 'f2071', 'f2072', 'f2073', 'f2074', 'f2075', 'f2076', 'f2077', 'f2078', 'f2079', 'f2080', 'f2081', 'f2082', 'f2083', 'f2084', 'f2085', 'f2086', 'f2087', 'f2088', 'f2089', 'f2090', 'f2091', 'f2092', 'f2093', 'f2094', 'f2095', 'f2096', 'f2097', 'f2098', 'f2099', 'f2100', 'f2101', 'f2102', 'f2103', 'f2104', 'f2105', 'f2106', 'f2107', 'f2108', 'f2109', 'f2110', 'f2111', 'f2112', 'f2113', 'f2114', 'f2115', 'f2116', 'f2117', 'f2118', 'f2119', 'f2120', 'f2121', 'f2122', 'f2123', 'f2124', 'f2125', 'f2126', 'f2127', 'f2128', 'f2129', 'f2130', 'f2131', 'f2132', 'f2133', 'f2134', 'f2135', 'f2136', 'f2137', 'f2138', 'f2139', 'f2140', 'f2141', 'f2142', 'f2143', 'f2144', 'f2145', 'f2146', 'f2147', 'f2148', 'f2149', 'f2150', 'f2151', 'f2152', 'f2153', 'f2154', 'f2155', 'f2156', 'f2157', 'f2158', 'f2159', 'f2160', 'f2161', 'f2162', 'f2163', 'f2164', 'f2165', 'f2166', 'f2167', 'f2168', 'f2169', 'f2170', 'f2171', 'f2172', 'f2173', 'f2174', 'f2175', 'f2176', 'f2177', 'f2178', 'f2179', 'f2180', 'f2181', 'f2182', 'f2183', 'f2184', 'f2185', 'f2186', 'f2187', 'f2188', 'f2189', 'f2190', 'f2191', 'f2192', 'f2193', 'f2194', 'f2195', 'f2196', 'f2197', 'f2198', 'f2199', 'f2200', 'f2201', 'f2202', 'f2203', 'f2204', 'f2205', 'f2206', 'f2207', 'f2208', 'f2209', 'f2210', 'f2211', 'f2212', 'f2213', 'f2214', 'f2215', 'f2216', 'f2217', 'f2218', 'f2219', 'f2220', 'f2221', 'f2222', 'f2223', 'f2224', 'f2225', 'f2226', 'f2227', 'f2228', 'f2229', 'f2230', 'f2231', 'f2232', 'f2233', 'f2234', 'f2235', 'f2236', 'f2237', 'f2238', 'f2239', 'f2240', 'f2241', 'f2242', 'f2243', 'f2244', 'f2245', 'f2246', 'f2247', 'f2248', 'f2249', 'f2250', 'f2251', 'f2252', 'f2253', 'f2254', 'f2255', 'f2256', 'f2257', 'f2258', 'f2259', 'f2260', 'f2261', 'f2262', 'f2263', 'f2264', 'f2265', 'f2266', 'f2267', 'f2268', 'f2269', 'f2270', 'f2271', 'f2272', 'f2273', 'f2274', 'f2275', 'f2276', 'f2277', 'f2278', 'f2279', 'f2280', 'f2281', 'f2282', 'f2283', 'f2284', 'f2285', 'f2286', 'f2287', 'f2288', 'f2289', 'f2290', 'f2291', 'f2292', 'f2293', 'f2294', 'f2295', 'f2296', 'f2297', 'f2298', 'f2299', 'f2300', 'f2301', 'f2302', 'f2303', 'f2304', 'f2305', 'f2306', 'f2307', 'f2308', 'f2309', 'f2310', 'f2311', 'f2312', 'f2313', 'f2314', 'f2315', 'f2316', 'f2317', 'f2318', 'f2319', 'f2320', 'f2321', 'f2322', 'f2323', 'f2324', 'f2325', 'f2326', 'f2327', 'f2328', 'f2329', 'f2330', 'f2331', 'f2332', 'f2333', 'f2334', 'f2335', 'f2336', 'f2337', 'f2338', 'f2339', 'f2340', 'f2341', 'f2342', 'f2343', 'f2344', 'f2345', 'f2346', 'f2347', 'f2348', 'f2349', 'f2350', 'f2351', 'f2352', 'f2353', 'f2354', 'f2355', 'f2356', 'f2357', 'f2358', 'f2359', 'f2360', 'f2361', 'f2362', 'f2363', 'f2364', 'f2365', 'f2366', 'f2367', 'f2368', 'f2369', 'f2370', 'f2371', 'f2372', 'f2373', 'f2374', 'f2375', 'f2376', 'f2377', 'f2378', 'f2379', 'f2380', 'f2381', 'f2382', 'f2383', 'f2384', 'f2385', 'f2386', 'f2387', 'f2388', 'f2389', 'f2390', 'f2391', 'f2392', 'f2393', 'f2394', 'f2395', 'f2396', 'f2397', 'f2398', 'f2399', 'f2400', 'f2401', 'f2402', 'f2403', 'f2404', 'f2405', 'f2406', 'f2407', 'f2408', 'f2409', 'f2410', 'f2411', 'f2412', 'f2413', 'f2414', 'f2415', 'f2416', 'f2417', 'f2418', 'f2419', 'f2420', 'f2421', 'f2422', 'f2423', 'f2424', 'f2425', 'f2426', 'f2427', 'f2428', 'f2429', 'f2430', 'f2431', 'f2432', 'f2433', 'f2434', 'f2435', 'f2436', 'f2437', 'f2438', 'f2439', 'f2440', 'f2441', 'f2442', 'f2443', 'f2444', 'f2445', 'f2446', 'f2447', 'f2448', 'f2449', 'f2450', 'f2451', 'f2452', 'f2453', 'f2454', 'f2455', 'f2456', 'f2457', 'f2458', 'f2459', 'f2460', 'f2461', 'f2462', 'f2463', 'f2464', 'f2465', 'f2466', 'f2467', 'f2468', 'f2469', 'f2470', 'f2471', 'f2472', 'f2473', 'f2474', 'f2475', 'f2476', 'f2477', 'f2478', 'f2479', 'f2480', 'f2481', 'f2482', 'f2483', 'f2484', 'f2485', 'f2486', 'f2487', 'f2488', 'f2489', 'f2490', 'f2491', 'f2492', 'f2493', 'f2494', 'f2495', 'f2496', 'f2497', 'f2498', 'f2499', 'f2500', 'f2501', 'f2502', 'f2503', 'f2504', 'f2505', 'f2506', 'f2507', 'f2508', 'f2509', 'f2510', 'f2511', 'f2512', 'f2513', 'f2514', 'f2515', 'f2516', 'f2517', 'f2518', 'f2519', 'f2520', 'f2521', 'f2522', 'f2523', 'f2524', 'f2525', 'f2526', 'f2527', 'f2528', 'f2529', 'f2530', 'f2531', 'f2532', 'f2533', 'f2534', 'f2535', 'f2536', 'f2537', 'f2538', 'f2539', 'f2540', 'f2541', 'f2542', 'f2543', 'f2544', 'f2545', 'f2546', 'f2547', 'f2548', 'f2549', 'f2550', 'f2551', 'f2552', 'f2553', 'f2554', 'f2555', 'f2556', 'f2557', 'f2558', 'f2559', 'f2560']\n",
"expected f2569, f2567, f2575, f2578, f2572, f2562, f2577, f2566, f2571, f2565, f2568, f2563, f2580, f2576, f2564, f2570, f2579, f2573, f2574, f2561 in input data\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:14,615:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.368852 14: 0.364754 15: 0.377049 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.368852 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.372951 36: 0.377049 37: 0.377049 38: 0.377049 39: 0.377049 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [13, 4, 4, 13, 4, 13, 37, 7, 12, 0, 10, 4, 4, 1, 3, 8, 4, 2, 4, 4, 4, 4, 1, 0, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 3, 2, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4]\n",
"\tWeights: [ 0.04 0.06 0.06 0.08 0.58 0. 0. 0.02 0.04 0. 0.02 0.\n",
" 0.02 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 13) (1, 21) (1, 37) (1, 39) (1, 40) (1, 65) (1, 67) (1, 69) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:14,623:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:57:14,624:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.250947 seconds\n",
"[INFO] [2016-08-16 07:57:14,626:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:57:15,355:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 198. configuration. Duration: 5.916148; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:57:15,728:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 163 training points for SMAC.\n",
"[INFO] [2016-08-16 07:57:32,097:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 16.3671 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:57:32,102:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 199. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:32,104:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 5\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.16521703672\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:32,638:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 199. configuration. Duration: 0.487921; loss: 0.676230; status 1; additional run info: ;duration: 0.48792099952697754;num_run:00199 \n",
"[INFO] [2016-08-16 07:57:32,645:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 200. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:32,647:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 2.74801246511\n",
" classifier:extra_trees:min_samples_leaf, Value: 5\n",
" classifier:extra_trees:min_samples_split, Value: 6\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 60.1843449141\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run20\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:57:32,699:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:32,753:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:32,834:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:57:32,847:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 200. configuration. Duration: 0.163840; loss: 0.807377; status 1; additional run info: ;duration: 0.16383981704711914;num_run:00200 \n",
"[INFO] [2016-08-16 07:57:32,853:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 201. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:32,856:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 3\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.132391803275\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:33,410:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 201. configuration. Duration: 0.510169; loss: 0.659836; status 1; additional run info: ;duration: 0.5101687908172607;num_run:00201 \n",
"[INFO] [2016-08-16 07:57:33,416:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 202. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:33,417:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.000214954870245\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: hinge\n",
" classifier:passive_aggressive:n_iter, Value: 363\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.291204146499\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 7\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 3\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 9\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 27\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:33,880:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 202. configuration. Duration: 0.431120; loss: 0.750000; status 1; additional run info: ;duration: 0.4311203956604004;num_run:00202 \n",
"[INFO] [2016-08-16 07:57:33,886:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 203. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:33,887:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 6\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.311045418818\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:57:34,415:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 203. configuration. Duration: 0.483259; loss: 0.651639; status 1; additional run info: ;duration: 0.4832592010498047;num_run:00203 \n",
"[INFO] [2016-08-16 07:57:34,421:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 204. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:34,422:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 29.038915074\n",
" classifier:bernoulli_nb:fit_prior, Value: True\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00140072631494\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:57:34,472:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 204. configuration. Duration: 0.018190; loss: 0.860656; status 1; additional run info: ;duration: 0.01819014549255371;num_run:00204 \n",
"[INFO] [2016-08-16 07:57:34,478:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 205. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:34,480:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 6\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:34,614:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.377049 11: 0.377049 12: 0.377049 13: 0.368852 14: 0.364754 15: 0.377049 16: 0.372951 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.368852 23: 0.372951 24: 0.377049 25: 0.377049 26: 0.377049 27: 0.377049 28: 0.377049 29: 0.377049 30: 0.377049 31: 0.377049 32: 0.377049 33: 0.377049 34: 0.377049 35: 0.372951 36: 0.377049 37: 0.377049 38: 0.377049 39: 0.377049 40: 0.381148 41: 0.381148 42: 0.381148 43: 0.381148 44: 0.381148 45: 0.381148 46: 0.381148 47: 0.381148 48: 0.381148 49: 0.381148\n",
"\tMembers: [12, 4, 4, 12, 4, 12, 36, 6, 11, 0, 9, 4, 4, 1, 3, 7, 4, 2, 4, 4, 4, 4, 1, 0, 7, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 3, 2, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4]\n",
"\tWeights: [ 0.04 0.06 0.06 0.08 0.58 0. 0.02 0.04 0. 0.02 0. 0.02\n",
" 0.06 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 13) (1, 21) (1, 37) (1, 39) (1, 40) (1, 65) (1, 67) (1, 69) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:34,619:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.381148\n",
"[INFO] [2016-08-16 07:57:34,621:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.925994 seconds\n",
"[INFO] [2016-08-16 07:57:34,623:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:57:35,007:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 205. configuration. Duration: 0.483058; loss: 0.651639; status 1; additional run info: ;duration: 0.48305845260620117;num_run:00205 \n",
"[INFO] [2016-08-16 07:57:35,013:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 206. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:35,015:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gaussian_nb\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0124414542936\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:57:35,064:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 206. configuration. Duration: 0.019117; loss: 0.975410; status 1; additional run info: ;duration: 0.019117116928100586;num_run:00206 \n",
"[INFO] [2016-08-16 07:57:35,070:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 207. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:35,071:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 5\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000410463630695\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:57:35,596:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 207. configuration. Duration: 0.480430; loss: 0.676230; status 1; additional run info: ;duration: 0.48043012619018555;num_run:00207 \n",
"[INFO] [2016-08-16 07:57:35,603:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 208. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:35,604:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 0.716784929831\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 18\n",
" classifier:random_forest:min_samples_split, Value: 2\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 4\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 2\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 6\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 98\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:36,009:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 208. configuration. Duration: 0.363225; loss: 0.778689; status 1; additional run info: ;duration: 0.36322498321533203;num_run:00208 \n",
"[INFO] [2016-08-16 07:57:36,015:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 209. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:36,016:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:36,548:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 209. configuration. Duration: 0.487857; loss: 0.672131; status 1; additional run info: ;duration: 0.4878566265106201;num_run:00209 \n",
"[INFO] [2016-08-16 07:57:36,554:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 210. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:36,555:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: entropy\n",
" classifier:decision_tree:max_depth, Value: 1.53828948588\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 12\n",
" classifier:decision_tree:min_samples_split, Value: 6\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.309344299591\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 112\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:36,596:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 210. configuration. Duration: 0.011760; loss: 0.680328; status 1; additional run info: ;duration: 0.011759519577026367;num_run:00210 \n",
"[INFO] [2016-08-16 07:57:36,602:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 211. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:36,604:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 7\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run20\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:57:36,638:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:36,692:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:36,773:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:57:37,126:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 211. configuration. Duration: 0.475494; loss: 0.668033; status 1; additional run info: ;duration: 0.475494384765625;num_run:00211 \n",
"[INFO] [2016-08-16 07:57:37,132:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 212. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:37,134:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: k_nearest_neighbors\n",
" classifier:k_nearest_neighbors:n_neighbors, Value: 4\n",
" classifier:k_nearest_neighbors:p, Value: 1\n",
" classifier:k_nearest_neighbors:weights, Value: uniform\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:gamma, Value: 0.0107801354251\n",
" preprocessor:kernel_pca:kernel, Value: rbf\n",
" preprocessor:kernel_pca:n_components, Value: 825\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:37,986:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 212. configuration. Duration: 0.808815; loss: 0.754098; status 1; additional run info: ;duration: 0.8088150024414062;num_run:00212 \n",
"[INFO] [2016-08-16 07:57:37,992:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 213. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:37,994:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 4\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:38,591:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 213. configuration. Duration: 0.549307; loss: 0.672131; status 1; additional run info: ;duration: 0.5493066310882568;num_run:00213 \n",
"[INFO] [2016-08-16 07:57:38,597:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 214. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:38,598:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 3.25542470077\n",
" classifier:extra_trees:min_samples_leaf, Value: 9\n",
" classifier:extra_trees:min_samples_split, Value: 7\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:57:38,896:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.364754 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.368852 20: 0.364754 21: 0.364754 22: 0.368852 23: 0.368852 24: 0.364754 25: 0.364754 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.364754 31: 0.364754 32: 0.364754 33: 0.364754 34: 0.364754 35: 0.364754 36: 0.364754 37: 0.364754 38: 0.364754 39: 0.368852 40: 0.368852 41: 0.368852 42: 0.368852 43: 0.364754 44: 0.364754 45: 0.364754 46: 0.364754 47: 0.364754 48: 0.364754 49: 0.364754\n",
"\tMembers: [10, 3, 3, 10, 3, 10, 32, 4, 9, 30, 9, 30, 43, 3, 3, 3, 3, 3, 3, 32, 3, 5, 32, 3, 3, 4, 9, 3, 10, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3, 25, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4]\n",
"\tWeights: [ 0. 0.02 0. 0.62 0.06 0.02 0. 0. 0. 0.06 0.08 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0.04 0. 0.06 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 37) (1, 40) (1, 65) (1, 67) (1, 73) (1, 79) (1, 115) (1, 143) (1, 149) (1, 183)\n",
"[INFO] [2016-08-16 07:57:38,902:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.364754\n",
"[INFO] [2016-08-16 07:57:38,903:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.269057 seconds\n",
"[INFO] [2016-08-16 07:57:38,906:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (202)!.\n",
"[INFO] [2016-08-16 07:57:38,907:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (202)!\n",
"[INFO] [2016-08-16 07:57:38,911:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 214. configuration. Duration: 0.273234; loss: 0.704918; status 1; additional run info: ;duration: 0.27323365211486816;num_run:00214 \n",
"[ERROR] [2016-08-16 07:57:38,916:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:57:38,917:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 215. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:38,919:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 6\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[ERROR] [2016-08-16 07:57:38,976:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:39,063:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:57:39,491:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 215. configuration. Duration: 0.523072; loss: 0.651639; status 1; additional run info: ;duration: 0.5230720043182373;num_run:00215 \n",
"[INFO] [2016-08-16 07:57:39,498:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 216. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:39,500:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 1933.67081447\n",
" classifier:libsvm_svc:gamma, Value: 0.00190197303202\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 0.00352610701893\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 8\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 1\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 4\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 12\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:39,717:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 216. configuration. Duration: 0.183862; loss: 0.786885; status 1; additional run info: ;duration: 0.18386220932006836;num_run:00216 \n",
"[INFO] [2016-08-16 07:57:39,724:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 217. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:39,725:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.091031215205\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 2\n",
" classifier:xgradient_boosting:min_child_weight, Value: 14\n",
" classifier:xgradient_boosting:n_estimators, Value: 403\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.629802024309\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00217626677543\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: liblinear_svc_preprocessor\n",
" preprocessor:liblinear_svc_preprocessor:C, Value: 0.192329114009\n",
" preprocessor:liblinear_svc_preprocessor:dual, Constant: False\n",
" preprocessor:liblinear_svc_preprocessor:fit_intercept, Constant: True\n",
" preprocessor:liblinear_svc_preprocessor:intercept_scaling, Constant: 1\n",
" preprocessor:liblinear_svc_preprocessor:loss, Value: squared_hinge\n",
" preprocessor:liblinear_svc_preprocessor:multi_class, Constant: ovr\n",
" preprocessor:liblinear_svc_preprocessor:penalty, Constant: l1\n",
" preprocessor:liblinear_svc_preprocessor:tol, Value: 0.0489053657338\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:40,122:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 217. configuration. Duration: 0.365366; loss: 0.704918; status 1; additional run info: ;duration: 0.36536645889282227;num_run:00217 \n",
"[INFO] [2016-08-16 07:57:40,129:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 218. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:40,132:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 0.0228037945327\n",
" classifier:sgd:average, Value: True\n",
" classifier:sgd:epsilon, Value: 0.000865315793256\n",
" classifier:sgd:eta0, Value: 0.00880683802423\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: invscaling\n",
" classifier:sgd:loss, Value: modified_huber\n",
" classifier:sgd:n_iter, Value: 846\n",
" classifier:sgd:penalty, Value: l2\n",
" classifier:sgd:power_t, Value: 0.426367179413\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: euclidean\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 185\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:40,892:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.372951 8: 0.368852 9: 0.372951 10: 0.364754 11: 0.372951 12: 0.364754 13: 0.368852 14: 0.368852 15: 0.368852 16: 0.368852 17: 0.364754 18: 0.364754 19: 0.364754 20: 0.364754 21: 0.364754 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.368852 32: 0.368852 33: 0.368852 34: 0.364754 35: 0.368852 36: 0.368852 37: 0.368852 38: 0.368852 39: 0.364754 40: 0.364754 41: 0.364754 42: 0.364754 43: 0.364754 44: 0.364754 45: 0.364754 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.368852\n",
"\tMembers: [10, 3, 3, 10, 3, 10, 31, 4, 9, 29, 9, 29, 3, 31, 3, 3, 3, 3, 3, 3, 3, 4, 31, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 4, 7, 3, 3, 3]\n",
"\tWeights: [ 0.04 0. 0.06 0.62 0.06 0. 0. 0.02 0. 0.04 0.06 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.04 0. 0.06 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 21) (1, 39) (1, 40) (1, 65) (1, 69) (1, 73) (1, 79) (1, 143) (1, 149)\n",
"[INFO] [2016-08-16 07:57:40,899:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.368852\n",
"[INFO] [2016-08-16 07:57:40,901:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.989160 seconds\n",
"[INFO] [2016-08-16 07:57:40,905:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (206)!.\n",
"[INFO] [2016-08-16 07:57:40,907:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (206)!\n",
"[ERROR] [2016-08-16 07:57:40,917:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:40,977:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:57:41,056:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 218. configuration. Duration: 0.893092; loss: 0.745902; status 1; additional run info: ;duration: 0.8930916786193848;num_run:00218 \n",
"[INFO] [2016-08-16 07:57:41,064:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 219. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:41,066:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 2.91631855297\n",
" classifier:extra_trees:min_samples_leaf, Value: 12\n",
" classifier:extra_trees:min_samples_split, Value: 9\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000137277851742\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.0707963646551\n",
" preprocessor:select_rates:mode, Value: fpr\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: normalize\n",
"[ERROR] [2016-08-16 07:57:41,067:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"\n",
"[INFO] [2016-08-16 07:57:41,299:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 219. configuration. Duration: 0.191408; loss: 0.717213; status 1; additional run info: ;duration: 0.1914076805114746;num_run:00219 \n",
"[INFO] [2016-08-16 07:57:41,306:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 220. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:41,307:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: proj_logit\n",
" classifier:proj_logit:max_epochs, Value: 8\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:57:41,383:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 220. configuration. Duration: 0.036238; loss: 0.741803; status 1; additional run info: ;duration: 0.03623771667480469;num_run:00220 \n",
"[INFO] [2016-08-16 07:57:41,391:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 221. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:41,393:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 3.31497888247\n",
" classifier:extra_trees:min_samples_leaf, Value: 19\n",
" classifier:extra_trees:min_samples_split, Value: 12\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000655564718535\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.362197626727\n",
" preprocessor:select_rates:mode, Value: fwe\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:57:41,620:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 221. configuration. Duration: 0.186989; loss: 0.725410; status 1; additional run info: ;duration: 0.18698930740356445;num_run:00221 \n",
"[INFO] [2016-08-16 07:57:41,627:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 222. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:41,628:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.0721445006771\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: squared_hinge\n",
" classifier:passive_aggressive:n_iter, Value: 260\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00587244728904\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:57:42,391:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 222. configuration. Duration: 0.731141; loss: 0.774590; status 1; additional run info: ;duration: 0.7311410903930664;num_run:00222 \n",
"[INFO] [2016-08-16 07:57:42,397:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 223. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:42,399:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: False\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 1.19707589764\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 4\n",
" classifier:random_forest:min_samples_split, Value: 18\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000230165604281\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 18\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:57:42,644:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 223. configuration. Duration: 0.204961; loss: 0.725410; status 1; additional run info: ;duration: 0.20496058464050293;num_run:00223 \n",
"[INFO] [2016-08-16 07:57:42,651:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 224. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:42,652:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0331040310821\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 5\n",
" classifier:xgradient_boosting:min_child_weight, Value: 11\n",
" classifier:xgradient_boosting:n_estimators, Value: 374\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.861562950466\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: pca\n",
" preprocessor:pca:keep_variance, Value: 0.635472587278\n",
" preprocessor:pca:whiten, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:57:42,933:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.372951\n",
"\tMembers: [9, 3, 3, 9, 3, 9, 30, 9, 3, 2, 9, 30, 3, 3, 3, 3, 30, 2, 9, 3, 3, 3, 1, 3, 3, 3, 2, 3, 3, 9, 3, 3, 3, 3, 3, 3, 8, 3, 30, 1, 1, 3, 3, 3, 3, 3, 3, 3, 2, 3]\n",
"\tWeights: [ 0. 0.06 0.08 0.62 0. 0. 0. 0. 0.02 0.14 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 37) (1, 39) (1, 40) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:42,938:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:57:42,940:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.026746 seconds\n",
"[INFO] [2016-08-16 07:57:42,942:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (209)!.\n",
"[INFO] [2016-08-16 07:57:42,944:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (209)!\n",
"[ERROR] [2016-08-16 07:57:42,954:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:43,013:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:43,103:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:57:43,246:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 224. configuration. Duration: 0.559515; loss: 0.750000; status 1; additional run info: ;duration: 0.5595149993896484;num_run:00224 \n",
"[INFO] [2016-08-16 07:57:43,252:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 225. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:43,254:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: False\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 3.69451504562\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 12\n",
" classifier:random_forest:min_samples_split, Value: 5\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: exp\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 108, in _ica_par\n",
" - g_wtx[:, np.newaxis] * W)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 55, in _sym_decorrelation\n",
" s, u = linalg.eigh(np.dot(W, W.T))\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/linalg/decomp.py\", line 288, in eigh\n",
" a1 = _asarray_validated(a, check_finite=check_finite)\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/_lib/_util.py\", line 187, in _asarray_validated\n",
" a = toarray(a)\n",
" File \"/opt/conda/lib/python3.5/site-packages/numpy/lib/function_base.py\", line 668, in asarray_chkfinite\n",
" \"array must not contain infs or NaNs\")\n",
"ValueError: array must not contain infs or NaNs\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 36, in fit\n",
" raise ValueError(\"Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\")\n",
"ValueError: Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:57:44,370:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 225. configuration. Duration: 1.111333; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:57:44,378:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 226. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:57:44,379:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.148996234212\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 8\n",
" classifier:gradient_boosting:max_features, Value: 2.36220206009\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 14\n",
" classifier:gradient_boosting:min_samples_split, Value: 19\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 162\n",
" classifier:gradient_boosting:subsample, Value: 0.870347862583\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000283836810573\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:gamma, Value: 0.725350032904\n",
" preprocessor:kernel_pca:kernel, Value: rbf\n",
" preprocessor:kernel_pca:n_components, Value: 449\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:57:45,234:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.372951\n",
"\tMembers: [9, 3, 3, 9, 3, 9, 30, 9, 3, 2, 9, 30, 3, 3, 3, 3, 30, 2, 9, 3, 3, 3, 1, 3, 3, 3, 2, 3, 3, 9, 3, 3, 3, 3, 3, 3, 8, 3, 30, 1, 1, 3, 3, 3, 3, 3, 3, 3, 2, 3]\n",
"\tWeights: [ 0. 0.06 0.08 0.62 0. 0. 0. 0. 0.02 0.14 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 37) (1, 39) (1, 40) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:45,240:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:57:45,242:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.292228 seconds\n",
"[INFO] [2016-08-16 07:57:45,243:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run23\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:57:47,260:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:47,322:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:47,419:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:57:49,337:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.372951\n",
"\tMembers: [9, 3, 3, 9, 3, 9, 30, 9, 3, 2, 9, 30, 3, 3, 3, 3, 30, 2, 9, 3, 3, 3, 1, 3, 3, 3, 2, 3, 3, 9, 3, 3, 3, 3, 3, 3, 8, 3, 30, 1, 1, 3, 3, 3, 3, 3, 3, 3, 2, 3]\n",
"\tWeights: [ 0. 0.06 0.08 0.62 0. 0. 0. 0. 0.02 0.14 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 37) (1, 39) (1, 40) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:49,343:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:57:49,344:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.088955 seconds\n",
"[INFO] [2016-08-16 07:57:49,346:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:57:52,367:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 226. configuration. Duration: 7.831782; loss: 0.827869; status 1; additional run info: ;duration: 7.831782341003418;num_run:00226 \n",
"[INFO] [2016-08-16 07:57:52,737:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 187 training points for SMAC.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run23\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:57:53,368:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:57:53,423:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:57:53,512:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:57:55,421:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.372951\n",
"\tMembers: [9, 3, 3, 9, 3, 9, 30, 9, 3, 2, 9, 30, 3, 3, 3, 3, 30, 2, 9, 3, 3, 3, 1, 3, 3, 3, 2, 3, 3, 9, 3, 3, 3, 3, 3, 3, 8, 3, 30, 1, 1, 3, 3, 3, 3, 3, 3, 3, 2, 3]\n",
"\tWeights: [ 0. 0.06 0.08 0.62 0. 0. 0. 0. 0.02 0.14 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 37) (1, 39) (1, 40) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:57:55,427:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:57:55,428:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.064626 seconds\n",
"[INFO] [2016-08-16 07:57:55,430:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:58:07,003:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 14.2643 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:58:07,008:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 227. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:07,009:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:07,505:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 227. configuration. Duration: 0.453510; loss: 0.647541; status 1; additional run info: ;duration: 0.4535098075866699;num_run:00227 \n",
"[INFO] [2016-08-16 07:58:07,511:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 228. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:07,512:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 1.36569810232\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: hinge\n",
" classifier:passive_aggressive:n_iter, Value: 464\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0162880404214\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 1.22361961968\n",
" preprocessor:kitchen_sinks:n_components, Value: 127\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:08,410:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 228. configuration. Duration: 0.842628; loss: 1.016393; status 1; additional run info: ;duration: 0.8426275253295898;num_run:00228 \n",
"[INFO] [2016-08-16 07:58:08,416:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 229. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:08,417:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:08,943:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 229. configuration. Duration: 0.482374; loss: 0.647541; status 1; additional run info: ;duration: 0.4823744297027588;num_run:00229 \n",
"[INFO] [2016-08-16 07:58:08,949:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 230. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:08,950:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.253631551281\n",
" classifier:adaboost:max_depth, Value: 5\n",
" classifier:adaboost:n_estimators, Value: 68\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00724200852202\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 4.76648523265\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 2\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 6\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:09,282:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 230. configuration. Duration: 0.290014; loss: 0.709016; status 1; additional run info: ;duration: 0.29001426696777344;num_run:00230 \n",
"[INFO] [2016-08-16 07:58:09,287:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 231. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:09,288:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.01\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run23\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:58:09,486:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:09,539:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:09,626:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:09,674:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:09,853:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 231. configuration. Duration: 0.510329; loss: 0.647541; status 1; additional run info: ;duration: 0.5103294849395752;num_run:00231 \n",
"[INFO] [2016-08-16 07:58:09,860:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 232. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:09,861:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: proj_logit\n",
" classifier:proj_logit:max_epochs, Value: 14\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.134149183255\n",
" preprocessor:select_rates:mode, Value: fdr\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:58:09,984:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 232. configuration. Duration: 0.078069; loss: 0.848361; status 1; additional run info: ;duration: 0.07806873321533203;num_run:00232 \n",
"[INFO] [2016-08-16 07:58:09,993:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 233. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:09,995:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:10,645:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 233. configuration. Duration: 0.599414; loss: 0.647541; status 1; additional run info: ;duration: 0.5994138717651367;num_run:00233 \n",
"[INFO] [2016-08-16 07:58:10,653:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 234. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:10,655:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 5.15456599113\n",
" classifier:libsvm_svc:coef0, Value: -0.444119623302\n",
" classifier:libsvm_svc:gamma, Value: 0.047531711626\n",
" classifier:libsvm_svc:kernel, Value: sigmoid\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 0.0027244726977\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 10\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 8\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 20\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 71\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:11,171:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 234. configuration. Duration: 0.476181; loss: 0.852459; status 1; additional run info: ;duration: 0.4761812686920166;num_run:00234 \n",
"[INFO] [2016-08-16 07:58:11,176:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 235. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:11,178:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00024395872174\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:11,771:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.372951 30: 0.372951 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.372951 36: 0.372951 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.372951\n",
"\tMembers: [9, 3, 3, 9, 3, 9, 29, 9, 3, 2, 9, 29, 3, 3, 3, 3, 29, 2, 9, 3, 3, 3, 1, 3, 3, 3, 2, 3, 3, 9, 3, 3, 3, 3, 3, 3, 8, 3, 29, 1, 1, 3, 3, 3, 3, 3, 3, 3, 2, 3]\n",
"\tWeights: [ 0. 0.06 0.08 0.62 0. 0. 0. 0. 0.02 0.14 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 37) (1, 39) (1, 40) (1, 73) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:58:11,777:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:11,778:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.295777 seconds\n",
"[INFO] [2016-08-16 07:58:11,780:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:58:11,878:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 235. configuration. Duration: 0.648618; loss: 0.647541; status 1; additional run info: ;duration: 0.6486175060272217;num_run:00235 \n",
"[INFO] [2016-08-16 07:58:11,884:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 236. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:11,886:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 1829.01065174\n",
" classifier:libsvm_svc:gamma, Value: 0.558090372693\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: True\n",
" classifier:libsvm_svc:tol, Value: 0.00577294880908\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000103490633032\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:kernel, Value: cosine\n",
" preprocessor:kernel_pca:n_components, Value: 637\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:13,500:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 236. configuration. Duration: 1.572980; loss: 0.868852; status 1; additional run info: ;duration: 1.5729801654815674;num_run:00236 \n",
"[INFO] [2016-08-16 07:58:13,507:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 237. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:13,508:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run23\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:58:13,795:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:13,860:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:13,955:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:14,002:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:14,131:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 237. configuration. Duration: 0.567623; loss: 0.647541; status 1; additional run info: ;duration: 0.5676233768463135;num_run:00237 \n",
"[INFO] [2016-08-16 07:58:14,139:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 238. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:14,140:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.128651484774\n",
" classifier:adaboost:max_depth, Value: 6\n",
" classifier:adaboost:n_estimators, Value: 384\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0350077160474\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:kernel, Value: cosine\n",
" preprocessor:kernel_pca:n_components, Value: 561\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:16,222:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.368852 44: 0.368852 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 27, 7, 2, 1, 7, 27, 2, 2, 2, 2, 27, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 20, 2, 2, 2, 1, 6, 7, 27, 2, 2, 2, 25, 2, 1, 2, 2, 7, 2, 2]\n",
"\tWeights: [ 0. 0.08 0.62 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.\n",
" 0.02 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 113) (1, 143) (1, 149)\n",
"[INFO] [2016-08-16 07:58:16,229:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:16,231:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.440774 seconds\n",
"[INFO] [2016-08-16 07:58:16,234:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (227)!.\n",
"[INFO] [2016-08-16 07:58:16,236:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (227)!\n",
"[ERROR] [2016-08-16 07:58:16,248:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:16,317:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:16,421:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:16,473:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:18,570:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.368852 44: 0.368852 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 26, 7, 2, 1, 7, 26, 2, 2, 2, 2, 26, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 20, 2, 2, 2, 1, 6, 7, 26, 2, 2, 2, 24, 2, 1, 2, 2, 7, 2, 2]\n",
"\tWeights: [ 0. 0.08 0.62 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 119) (1, 143) (1, 149)\n",
"[INFO] [2016-08-16 07:58:18,576:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:18,578:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.335383 seconds\n",
"[INFO] [2016-08-16 07:58:18,580:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:58:25,288:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 238. configuration. Duration: 11.046431; loss: 0.741803; status 1; additional run info: ;duration: 11.046431303024292;num_run:00238 \n",
"[INFO] [2016-08-16 07:58:25,698:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 195 training points for SMAC.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run24\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:58:26,620:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:26,679:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:26,768:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:26,812:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:28,640:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.368852 44: 0.368852 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 26, 7, 2, 1, 7, 26, 2, 2, 2, 2, 26, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 20, 2, 2, 2, 1, 6, 7, 26, 2, 2, 2, 24, 2, 1, 2, 2, 7, 2, 2]\n",
"\tWeights: [ 0. 0.08 0.62 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 119) (1, 143) (1, 149)\n",
"[INFO] [2016-08-16 07:58:28,646:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:28,647:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.031518 seconds\n",
"[INFO] [2016-08-16 07:58:28,649:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:58:39,293:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 13.5936 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:58:39,298:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 239. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:39,300:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: logcosh\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 108, in _ica_par\n",
" - g_wtx[:, np.newaxis] * W)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 55, in _sym_decorrelation\n",
" s, u = linalg.eigh(np.dot(W, W.T))\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/linalg/decomp.py\", line 288, in eigh\n",
" a1 = _asarray_validated(a, check_finite=check_finite)\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/_lib/_util.py\", line 187, in _asarray_validated\n",
" a = toarray(a)\n",
" File \"/opt/conda/lib/python3.5/site-packages/numpy/lib/function_base.py\", line 668, in asarray_chkfinite\n",
" \"array must not contain infs or NaNs\")\n",
"ValueError: array must not contain infs or NaNs\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 36, in fit\n",
" raise ValueError(\"Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\")\n",
"ValueError: Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:40,391:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 239. configuration. Duration: 1.084260; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:58:40,399:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 240. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:40,401:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 163\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 0.0137495531096\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.20284845772\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 73.2842036082\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:58:40,448:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 240. configuration. Duration: 0.018744; loss: 0.872951; status 1; additional run info: ;duration: 0.018743515014648438;num_run:00240 \n",
"[INFO] [2016-08-16 07:58:40,453:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 241. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:40,455:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00216585486193\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: logcosh\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 108, in _ica_par\n",
" - g_wtx[:, np.newaxis] * W)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 55, in _sym_decorrelation\n",
" s, u = linalg.eigh(np.dot(W, W.T))\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/linalg/decomp.py\", line 288, in eigh\n",
" a1 = _asarray_validated(a, check_finite=check_finite)\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/_lib/_util.py\", line 187, in _asarray_validated\n",
" a = toarray(a)\n",
" File \"/opt/conda/lib/python3.5/site-packages/numpy/lib/function_base.py\", line 668, in asarray_chkfinite\n",
" \"array must not contain infs or NaNs\")\n",
"ValueError: array must not contain infs or NaNs\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 36, in fit\n",
" raise ValueError(\"Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\")\n",
"ValueError: Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\n",
"You are already timing task: index_run24\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:58:40,709:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:40,763:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:40,843:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:40,884:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:41,529:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 241. configuration. Duration: 1.067196; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:58:41,535:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 242. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:41,537:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: k_nearest_neighbors\n",
" classifier:k_nearest_neighbors:n_neighbors, Value: 90\n",
" classifier:k_nearest_neighbors:p, Value: 1\n",
" classifier:k_nearest_neighbors:weights, Value: distance\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 6\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 12\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 20\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 68\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:58:41,765:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 242. configuration. Duration: 0.194426; loss: 0.778689; status 1; additional run info: ;duration: 0.1944260597229004;num_run:00242 \n",
"[INFO] [2016-08-16 07:58:41,771:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 243. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:41,773:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: logcosh\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 108, in _ica_par\n",
" - g_wtx[:, np.newaxis] * W)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 55, in _sym_decorrelation\n",
" s, u = linalg.eigh(np.dot(W, W.T))\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/linalg/decomp.py\", line 288, in eigh\n",
" a1 = _asarray_validated(a, check_finite=check_finite)\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/_lib/_util.py\", line 187, in _asarray_validated\n",
" a = toarray(a)\n",
" File \"/opt/conda/lib/python3.5/site-packages/numpy/lib/function_base.py\", line 668, in asarray_chkfinite\n",
" \"array must not contain infs or NaNs\")\n",
"ValueError: array must not contain infs or NaNs\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 36, in fit\n",
" raise ValueError(\"Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\")\n",
"ValueError: Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:42,558:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.368852 44: 0.368852 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 26, 7, 2, 1, 7, 26, 2, 2, 2, 2, 26, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 20, 2, 2, 2, 1, 6, 7, 26, 2, 2, 2, 24, 2, 1, 2, 2, 7, 2, 2]\n",
"\tWeights: [ 0. 0.08 0.62 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 119) (1, 143) (1, 149)\n",
"[INFO] [2016-08-16 07:58:42,563:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:42,564:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.859799 seconds\n",
"[INFO] [2016-08-16 07:58:42,566:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:58:42,868:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 243. configuration. Duration: 1.088918; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:58:42,876:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 244. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:42,878:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: multinomial_nb\n",
" classifier:multinomial_nb:alpha, Value: 26.5580727027\n",
" classifier:multinomial_nb:fit_prior, Value: False\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0297968694291\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 35.0478342302\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:42,922:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 244. configuration. Duration: 0.014293; loss: 0.922131; status 1; additional run info: ;duration: 0.014292716979980469;num_run:00244 \n",
"[INFO] [2016-08-16 07:58:42,928:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 245. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:42,930:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00220659936221\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: logcosh\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 108, in _ica_par\n",
" - g_wtx[:, np.newaxis] * W)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 55, in _sym_decorrelation\n",
" s, u = linalg.eigh(np.dot(W, W.T))\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/linalg/decomp.py\", line 288, in eigh\n",
" a1 = _asarray_validated(a, check_finite=check_finite)\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/_lib/_util.py\", line 187, in _asarray_validated\n",
" a = toarray(a)\n",
" File \"/opt/conda/lib/python3.5/site-packages/numpy/lib/function_base.py\", line 668, in asarray_chkfinite\n",
" \"array must not contain infs or NaNs\")\n",
"ValueError: array must not contain infs or NaNs\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 36, in fit\n",
" raise ValueError(\"Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\")\n",
"ValueError: Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:44,003:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 245. configuration. Duration: 1.067341; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:58:44,011:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 246. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:44,013:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: entropy\n",
" classifier:decision_tree:max_depth, Value: 0.761609154976\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 16\n",
" classifier:decision_tree:min_samples_split, Value: 10\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:44,150:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 246. configuration. Duration: 0.108585; loss: 0.754098; status 1; additional run info: ;duration: 0.10858488082885742;num_run:00246 \n",
"[INFO] [2016-08-16 07:58:44,156:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 247. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:44,157:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: logcosh\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Process pynisher function call:\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 33, in fit\n",
" self.preprocessor.fit(X)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 523, in fit\n",
" self._fit(X, compute_sources=False)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 479, in _fit\n",
" compute_sources=compute_sources, return_n_iter=True)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 335, in fastica\n",
" W, n_iter = _ica_par(X1, **kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 108, in _ica_par\n",
" - g_wtx[:, np.newaxis] * W)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/decomposition/fastica_.py\", line 55, in _sym_decorrelation\n",
" s, u = linalg.eigh(np.dot(W, W.T))\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/linalg/decomp.py\", line 288, in eigh\n",
" a1 = _asarray_validated(a, check_finite=check_finite)\n",
" File \"/opt/conda/lib/python3.5/site-packages/scipy/_lib/_util.py\", line 187, in _asarray_validated\n",
" a = toarray(a)\n",
" File \"/opt/conda/lib/python3.5/site-packages/numpy/lib/function_base.py\", line 668, in asarray_chkfinite\n",
" \"array must not contain infs or NaNs\")\n",
"ValueError: array must not contain infs or NaNs\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 249, in _bootstrap\n",
" self.run()\n",
" File \"/opt/conda/lib/python3.5/multiprocessing/process.py\", line 93, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"/opt/conda/lib/python3.5/site-packages/pynisher/limit_function_call.py\", line 83, in subprocess_func\n",
" return_value = ((func(*args, **kwargs), 0))\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 148, in eval_holdout\n",
" loss, opt_pred, valid_pred, test_pred = evaluator.fit_predict_and_loss()\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/evaluation/holdout_evaluator.py\", line 59, in fit_predict_and_loss\n",
" self.model.fit(X_train, Y_train)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 62, in fit\n",
" init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/classification.py\", line 87, in pre_transform\n",
" X, y, fit_params=fit_params, init_params=init_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/base.py\", line 131, in pre_transform\n",
" X, fit_params = self.pipeline_._pre_transform(X, y, **fit_params)\n",
" File \"/opt/conda/lib/python3.5/site-packages/sklearn/pipeline.py\", line 147, in _pre_transform\n",
" Xt = transform.fit(Xt, y, **fit_params_steps[name]) \\\n",
" File \"/opt/conda/lib/python3.5/site-packages/autosklearn/pipeline/components/feature_preprocessing/fast_ica.py\", line 36, in fit\n",
" raise ValueError(\"Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\")\n",
"ValueError: Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738\n",
"You are already timing task: index_run24\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:58:44,587:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:44,635:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:44,709:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:44,745:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:45,224:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 247. configuration. Duration: 1.061876; loss: 2.000000; status 3; additional run info: \n",
"[INFO] [2016-08-16 07:58:45,231:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 248. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:45,232:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 7.05765591146\n",
" classifier:libsvm_svc:gamma, Value: 0.00998941511712\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 8.4086885902e-05\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0276695772285\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 20.0121367493\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:45,314:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 248. configuration. Duration: 0.052881; loss: 0.872951; status 1; additional run info: ;duration: 0.05288124084472656;num_run:00248 \n",
"[INFO] [2016-08-16 07:58:45,321:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 249. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:45,322:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0557671619111\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:45,865:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 249. configuration. Duration: 0.500574; loss: 0.647541; status 1; additional run info: ;duration: 0.5005738735198975;num_run:00249 \n",
"[INFO] [2016-08-16 07:58:45,871:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 250. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:45,873:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: True\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 2.96740124817\n",
" classifier:extra_trees:min_samples_leaf, Value: 14\n",
" classifier:extra_trees:min_samples_split, Value: 14\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.00168368290437\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: cosine\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 367\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:58:46,074:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 250. configuration. Duration: 0.165070; loss: 0.717213; status 1; additional run info: ;duration: 0.1650698184967041;num_run:00250 \n",
"[INFO] [2016-08-16 07:58:46,081:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 251. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:46,083:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00140780731379\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:46,423:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.372951 32: 0.372951 33: 0.372951 34: 0.372951 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.377049 39: 0.372951 40: 0.372951 41: 0.372951 42: 0.372951 43: 0.368852 44: 0.368852 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 26, 7, 2, 1, 7, 26, 2, 2, 2, 2, 26, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 20, 2, 2, 2, 1, 6, 7, 26, 2, 2, 2, 24, 2, 1, 2, 2, 7, 2, 2]\n",
"\tWeights: [ 0. 0.08 0.62 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0. 0. 0.02\n",
" 0. 0.08 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 119) (1, 143) (1, 149)\n",
"[INFO] [2016-08-16 07:58:46,428:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:46,429:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 1.847049 seconds\n",
"[INFO] [2016-08-16 07:58:46,431:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:58:46,617:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 251. configuration. Duration: 0.491348; loss: 0.647541; status 1; additional run info: ;duration: 0.4913480281829834;num_run:00251 \n",
"[INFO] [2016-08-16 07:58:46,623:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 252. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:46,625:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 2.19779026094\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: hinge\n",
" classifier:passive_aggressive:n_iter, Value: 166\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0417120368741\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.478991322012\n",
" preprocessor:select_rates:mode, Value: fdr\n",
" preprocessor:select_rates:score_func, Value: chi2\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:58:46,780:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 252. configuration. Duration: 0.128166; loss: 0.864754; status 1; additional run info: ;duration: 0.12816572189331055;num_run:00252 \n",
"[INFO] [2016-08-16 07:58:46,786:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 253. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:46,787:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0322275492407\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:47,282:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 253. configuration. Duration: 0.450714; loss: 0.647541; status 1; additional run info: ;duration: 0.4507136344909668;num_run:00253 \n",
"[INFO] [2016-08-16 07:58:47,289:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 254. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:47,291:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 147\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 3.09181477593e-05\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:coef0, Value: -0.948574597272\n",
" preprocessor:kernel_pca:degree, Value: 5\n",
" preprocessor:kernel_pca:gamma, Value: 3.20669498205e-05\n",
" preprocessor:kernel_pca:kernel, Value: poly\n",
" preprocessor:kernel_pca:n_components, Value: 485\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:58:48,122:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 254. configuration. Duration: 0.762486; loss: 0.868852; status 1; additional run info: ;duration: 0.762486457824707;num_run:00254 \n",
"[INFO] [2016-08-16 07:58:48,130:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 255. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:48,131:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: False\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 4.79442595134\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 1\n",
" classifier:random_forest:min_samples_split, Value: 20\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: deflation\n",
" preprocessor:fast_ica:fun, Value: cube\n",
" preprocessor:fast_ica:n_components, Value: 1051\n",
" preprocessor:fast_ica:whiten, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"n_components is too large: it will be set to 9\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run24\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:58:48,445:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:48,505:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:48,583:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:48,638:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:49,266:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 255. configuration. Duration: 1.096249; loss: 0.758197; status 1; additional run info: ;duration: 1.0962486267089844;num_run:00255 \n",
"[INFO] [2016-08-16 07:58:49,274:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 256. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:49,277:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.0826346409249\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: hinge\n",
" classifier:passive_aggressive:n_iter, Value: 88\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.000159162512414\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 3.88711161466\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 9\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 16\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:49,561:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 256. configuration. Duration: 0.244519; loss: 0.786885; status 1; additional run info: ;duration: 0.24451923370361328;num_run:00256 \n",
"[INFO] [2016-08-16 07:58:49,568:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 257. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:49,570:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.524223272018\n",
" classifier:adaboost:max_depth, Value: 2\n",
" classifier:adaboost:n_estimators, Value: 150\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00042196261322\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_rates\n",
" preprocessor:select_rates:alpha, Value: 0.259669948396\n",
" preprocessor:select_rates:mode, Value: fdr\n",
" preprocessor:select_rates:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:58:49,858:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 257. configuration. Duration: 0.240685; loss: 0.713115; status 1; additional run info: ;duration: 0.24068474769592285;num_run:00257 \n",
"[INFO] [2016-08-16 07:58:49,866:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 258. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:49,867:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.000118686674731\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: squared_hinge\n",
" classifier:passive_aggressive:n_iter, Value: 5\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 0.934743328868\n",
" preprocessor:kitchen_sinks:n_components, Value: 89\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:58:49,985:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 258. configuration. Duration: 0.050747; loss: 0.860656; status 1; additional run info: ;duration: 0.05074667930603027;num_run:00258 \n",
"[INFO] [2016-08-16 07:58:49,993:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 259. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:49,995:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 4.35195219912\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 10\n",
" classifier:random_forest:min_samples_split, Value: 13\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: exp\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:58:50,404:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 259. configuration. Duration: 0.369153; loss: 0.750000; status 1; additional run info: ;duration: 0.3691532611846924;num_run:00259 \n",
"[INFO] [2016-08-16 07:58:50,412:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 260. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:50,414:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 0.0841053273233\n",
" classifier:libsvm_svc:coef0, Value: -0.271867456316\n",
" classifier:libsvm_svc:degree, Value: 4\n",
" classifier:libsvm_svc:gamma, Value: 4.28871114284e-05\n",
" classifier:libsvm_svc:kernel, Value: poly\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 2.17182091601e-05\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0104071405431\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 95.525679335\n",
" preprocessor:select_percentile_classification:score_func, Value: f_classif\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:50,486:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 260. configuration. Duration: 0.040026; loss: 0.860656; status 1; additional run info: ;duration: 0.040025949478149414;num_run:00260 \n",
"[INFO] [2016-08-16 07:58:50,492:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 261. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:50,494:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.113956269486\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 4\n",
" classifier:xgradient_boosting:min_child_weight, Value: 18\n",
" classifier:xgradient_boosting:n_estimators, Value: 307\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.667919040627\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00020133188394\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:58:50,609:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.368852 32: 0.364754 33: 0.364754 34: 0.368852 35: 0.364754 36: 0.364754 37: 0.368852 38: 0.368852 39: 0.368852 40: 0.368852 41: 0.368852 42: 0.372951 43: 0.372951 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.372951 48: 0.368852 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 25, 7, 2, 1, 7, 25, 2, 2, 2, 2, 25, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 39, 2, 2, 25, 2, 6, 2, 2, 7, 2, 2, 2, 23, 25, 1, 7]\n",
"\tWeights: [ 0. 0.08 0.6 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.1\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 143) (1, 149) (1, 211)\n",
"[INFO] [2016-08-16 07:58:50,615:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:50,617:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.176121 seconds\n",
"[INFO] [2016-08-16 07:58:50,619:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (240)!.\n",
"[INFO] [2016-08-16 07:58:50,621:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (240)!\n",
"[ERROR] [2016-08-16 07:58:50,632:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:50,694:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:50,787:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:50,833:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:51,067:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 261. configuration. Duration: 0.539521; loss: 0.704918; status 1; additional run info: ;duration: 0.5395214557647705;num_run:00261 \n",
"[INFO] [2016-08-16 07:58:51,074:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 262. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:51,076:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 20234.6731671\n",
" classifier:libsvm_svc:gamma, Value: 0.103789295331\n",
" classifier:libsvm_svc:kernel, Value: rbf\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: True\n",
" classifier:libsvm_svc:tol, Value: 0.00198461388059\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:51,257:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 262. configuration. Duration: 0.149132; loss: 0.786885; status 1; additional run info: ;duration: 0.14913201332092285;num_run:00262 \n",
"[INFO] [2016-08-16 07:58:51,265:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 263. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:51,267:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.793020482109\n",
" classifier:adaboost:max_depth, Value: 4\n",
" classifier:adaboost:n_estimators, Value: 254\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000441147213686\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 2.02065003018\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 1\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 4\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:52,363:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 263. configuration. Duration: 1.030640; loss: 0.782787; status 1; additional run info: ;duration: 1.030639886856079;num_run:00263 \n",
"[INFO] [2016-08-16 07:58:52,371:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 264. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:52,373:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: liblinear_svc\n",
" classifier:liblinear_svc:C, Value: 2020.21054965\n",
" classifier:liblinear_svc:dual, Constant: False\n",
" classifier:liblinear_svc:fit_intercept, Constant: True\n",
" classifier:liblinear_svc:intercept_scaling, Constant: 1\n",
" classifier:liblinear_svc:loss, Value: squared_hinge\n",
" classifier:liblinear_svc:multi_class, Constant: ovr\n",
" classifier:liblinear_svc:penalty, Value: l2\n",
" classifier:liblinear_svc:tol, Value: 0.000212559610957\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: deflation\n",
" preprocessor:fast_ica:fun, Value: cube\n",
" preprocessor:fast_ica:n_components, Value: 544\n",
" preprocessor:fast_ica:whiten, Value: True\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"n_components is too large: it will be set to 9\n",
"[INFO] [2016-08-16 07:58:52,516:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 264. configuration. Duration: 0.109359; loss: 0.721311; status 1; additional run info: ;duration: 0.1093590259552002;num_run:00264 \n",
"[INFO] [2016-08-16 07:58:52,524:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 265. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:52,525:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.167602737112\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 5\n",
" classifier:xgradient_boosting:min_child_weight, Value: 7\n",
" classifier:xgradient_boosting:n_estimators, Value: 137\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.156516674066\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: False\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 0.74438794527\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 14\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 20\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:58:52,808:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.368852 32: 0.364754 33: 0.364754 34: 0.368852 35: 0.364754 36: 0.364754 37: 0.368852 38: 0.368852 39: 0.368852 40: 0.368852 41: 0.368852 42: 0.372951 43: 0.372951 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.372951 48: 0.368852 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 25, 7, 2, 1, 7, 25, 2, 2, 2, 2, 25, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 39, 2, 2, 25, 2, 6, 2, 2, 7, 2, 2, 2, 23, 25, 1, 7]\n",
"\tWeights: [ 0. 0.08 0.6 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.1\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 143) (1, 149) (1, 211)"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"[INFO] [2016-08-16 07:58:52,815:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:52,817:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.189212 seconds\n",
"[INFO] [2016-08-16 07:58:52,819:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:58:52,863:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 265. configuration. Duration: 0.295356; loss: 0.700820; status 1; additional run info: ;duration: 0.29535555839538574;num_run:00265 \n",
"[INFO] [2016-08-16 07:58:52,871:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 266. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:58:52,873:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 2.0853229912e-06\n",
" classifier:sgd:average, Value: True\n",
" classifier:sgd:eta0, Value: 0.0938453015205\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: constant\n",
" classifier:sgd:loss, Value: hinge\n",
" classifier:sgd:n_iter, Value: 49\n",
" classifier:sgd:penalty, Value: l2\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 1.33646604904\n",
" preprocessor:kitchen_sinks:n_components, Value: 654\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:58:53,394:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 266. configuration. Duration: 0.448127; loss: 0.745902; status 1; additional run info: ;duration: 0.44812655448913574;num_run:00266 \n",
"[INFO] [2016-08-16 07:58:53,890:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 218 training points for SMAC.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run25\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:58:54,836:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:58:54,900:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:58:54,999:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:58:55,047:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:58:57,050:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.372951 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.364754 16: 0.368852 17: 0.368852 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.368852 32: 0.364754 33: 0.364754 34: 0.368852 35: 0.364754 36: 0.364754 37: 0.368852 38: 0.368852 39: 0.368852 40: 0.368852 41: 0.368852 42: 0.372951 43: 0.372951 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.372951 48: 0.368852 49: 0.372951\n",
"\tMembers: [7, 2, 2, 7, 2, 7, 25, 7, 2, 1, 7, 25, 2, 2, 2, 2, 25, 1, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 39, 2, 2, 25, 2, 6, 2, 2, 7, 2, 2, 2, 23, 25, 1, 7]\n",
"\tWeights: [ 0. 0.08 0.6 0. 0. 0. 0.02 0.16 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.1\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 39) (1, 40) (1, 73) (1, 79) (1, 143) (1, 149) (1, 211)\n",
"[INFO] [2016-08-16 07:58:57,056:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:58:57,058:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.226606 seconds\n",
"[INFO] [2016-08-16 07:58:57,060:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:59:10,854:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 16.9621 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:59:10,860:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 267. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:10,862:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:11,499:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 267. configuration. Duration: 0.587581; loss: 0.647541; status 1; additional run info: ;duration: 0.5875813961029053;num_run:00267 \n",
"[INFO] [2016-08-16 07:59:11,506:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 268. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:11,508:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: gini\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 0.590776897477\n",
" classifier:extra_trees:min_samples_leaf, Value: 2\n",
" classifier:extra_trees:min_samples_split, Value: 3\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.102555970304\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:59:11,809:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 268. configuration. Duration: 0.254910; loss: 0.721311; status 1; additional run info: ;duration: 0.2549104690551758;num_run:00268 \n",
"[INFO] [2016-08-16 07:59:11,816:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 269. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:11,818:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00178585503748\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:12,486:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 269. configuration. Duration: 0.615896; loss: 0.647541; status 1; additional run info: ;duration: 0.6158957481384277;num_run:00269 \n",
"[INFO] [2016-08-16 07:59:12,493:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 270. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:12,495:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: bernoulli_nb\n",
" classifier:bernoulli_nb:alpha, Value: 0.600861419593\n",
" classifier:bernoulli_nb:fit_prior, Value: False\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: nystroem_sampler\n",
" preprocessor:nystroem_sampler:coef0, Value: 0.228849660174\n",
" preprocessor:nystroem_sampler:degree, Value: 3\n",
" preprocessor:nystroem_sampler:gamma, Value: 1.1730226195\n",
" preprocessor:nystroem_sampler:kernel, Value: poly\n",
" preprocessor:nystroem_sampler:n_components, Value: 842\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/kernel_approximation.py:463: UserWarning: n_components > n_samples. This is not possible.\n",
"n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n",
" warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n",
"You are already timing task: index_run25\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:59:13,146:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[INFO] [2016-08-16 07:59:13,207:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 270. configuration. Duration: 0.614878; loss: 0.856557; status 1; additional run info: ;duration: 0.6148781776428223;num_run:00270 \n",
"[INFO] [2016-08-16 07:59:13,215:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 271. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:13,217:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[ERROR] [2016-08-16 07:59:13,248:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:13,351:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:13,402:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:59:13,887:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 271. configuration. Duration: 0.617685; loss: 0.647541; status 1; additional run info: ;duration: 0.6176848411560059;num_run:00271 \n",
"[INFO] [2016-08-16 07:59:13,896:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 272. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:13,897:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: libsvm_svc\n",
" classifier:libsvm_svc:C, Value: 731.150713256\n",
" classifier:libsvm_svc:coef0, Value: 0.467738961408\n",
" classifier:libsvm_svc:gamma, Value: 0.00161197537947\n",
" classifier:libsvm_svc:kernel, Value: sigmoid\n",
" classifier:libsvm_svc:max_iter, Constant: -1\n",
" classifier:libsvm_svc:shrinking, Value: False\n",
" classifier:libsvm_svc:tol, Value: 0.0159182615907\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 68.0766027974\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/svm/base.py:547: ChangedBehaviorWarning: The decision_function_shape default value will change from 'ovo' to 'ovr' in 0.18. This will change the shape of the decision function returned by SVC.\n",
" \"SVC.\", ChangedBehaviorWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:14,005:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 272. configuration. Duration: 0.071030; loss: 0.860656; status 1; additional run info: ;duration: 0.0710303783416748;num_run:00272 \n",
"[INFO] [2016-08-16 07:59:14,013:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 273. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:14,016:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:14,711:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 273. configuration. Duration: 0.637126; loss: 0.647541; status 1; additional run info: ;duration: 0.6371257305145264;num_run:00273 \n",
"[INFO] [2016-08-16 07:59:14,719:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 274. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:14,721:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: sgd\n",
" classifier:sgd:alpha, Value: 4.01516225312e-06\n",
" classifier:sgd:average, Value: False\n",
" classifier:sgd:eta0, Value: 0.045490815558\n",
" classifier:sgd:fit_intercept, Constant: True\n",
" classifier:sgd:learning_rate, Value: constant\n",
" classifier:sgd:loss, Value: perceptron\n",
" classifier:sgd:n_iter, Value: 7\n",
" classifier:sgd:penalty, Value: l1\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: pca\n",
" preprocessor:pca:keep_variance, Value: 0.947414482652\n",
" preprocessor:pca:whiten, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:59:14,796:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 274. configuration. Duration: 0.033135; loss: 0.901639; status 1; additional run info: ;duration: 0.033135175704956055;num_run:00274 \n",
"[INFO] [2016-08-16 07:59:14,803:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 275. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:14,805:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:15,578:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 275. configuration. Duration: 0.711133; loss: 0.647541; status 1; additional run info: ;duration: 0.7111327648162842;num_run:00275 \n",
"[INFO] [2016-08-16 07:59:15,587:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 276. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:15,592:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 3.76314683551\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 2\n",
" classifier:random_forest:min_samples_split, Value: 7\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:kernel, Value: cosine\n",
" preprocessor:kernel_pca:n_components, Value: 26\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:15,725:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.368852 11: 0.368852 12: 0.372951 13: 0.372951 14: 0.372951 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.368852 20: 0.368852 21: 0.368852 22: 0.368852 23: 0.368852 24: 0.368852 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.368852 32: 0.368852 33: 0.368852 34: 0.368852 35: 0.368852 36: 0.368852 37: 0.368852 38: 0.368852 39: 0.368852 40: 0.368852 41: 0.368852 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.368852\n",
"\tMembers: [5, 0, 0, 5, 0, 5, 23, 5, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 3, 0, 37, 0, 0, 3, 0, 5, 0, 0, 0, 0, 1, 0, 0, 0, 5, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 5]\n",
"\tWeights: [ 0.66 0.04 0. 0.12 0. 0.14 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 40) (1, 67) (1, 71) (1, 79) (1, 149) (1, 211)\n",
"[INFO] [2016-08-16 07:59:15,759:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.368852\n",
"[INFO] [2016-08-16 07:59:15,766:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.629172 seconds\n",
"[INFO] [2016-08-16 07:59:15,775:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (255)!.\n",
"[INFO] [2016-08-16 07:59:15,782:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (255)!\n",
"[ERROR] [2016-08-16 07:59:15,808:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:15,925:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:16,068:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:16,131:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:59:16,984:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 276. configuration. Duration: 1.260863; loss: 0.737705; status 1; additional run info: ;duration: 1.2608625888824463;num_run:00276 \n",
"[INFO] [2016-08-16 07:59:16,993:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 277. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:16,995:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.012740490995\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:17,778:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 277. configuration. Duration: 0.718196; loss: 0.647541; status 1; additional run info: ;duration: 0.718195915222168;num_run:00277 \n",
"[INFO] [2016-08-16 07:59:17,788:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 278. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:17,790:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: extra_trees\n",
" classifier:extra_trees:bootstrap, Value: False\n",
" classifier:extra_trees:criterion, Value: entropy\n",
" classifier:extra_trees:max_depth, Constant: None\n",
" classifier:extra_trees:max_features, Value: 4.34171265251\n",
" classifier:extra_trees:min_samples_leaf, Value: 11\n",
" classifier:extra_trees:min_samples_split, Value: 14\n",
" classifier:extra_trees:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:extra_trees:n_estimators, Constant: 100\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0022976889269\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 2.98063057031\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 16\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 3\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:18,308:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 278. configuration. Duration: 0.457024; loss: 0.684426; status 1; additional run info: ;duration: 0.45702409744262695;num_run:00278 \n",
"[INFO] [2016-08-16 07:59:18,318:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 279. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:18,319:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:18,684:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.368852 11: 0.368852 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.372951 20: 0.368852 21: 0.368852 22: 0.372951 23: 0.368852 24: 0.368852 25: 0.372951 26: 0.368852 27: 0.368852 28: 0.372951 29: 0.368852 30: 0.368852 31: 0.372951 32: 0.368852 33: 0.368852 34: 0.372951 35: 0.368852 36: 0.368852 37: 0.368852 38: 0.368852 39: 0.372951 40: 0.368852 41: 0.372951 42: 0.372951 43: 0.368852 44: 0.372951 45: 0.372951 46: 0.368852 47: 0.368852 48: 0.372951 49: 0.368852\n",
"\tMembers: [3, 0, 0, 3, 0, 3, 20, 3, 0, 0, 0, 3, 0, 0, 3, 0, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 3, 0, 0, 3, 20, 0, 3, 0, 0, 3, 0, 0, 0, 3, 0]\n",
"\tWeights: [ 0.62 0. 0. 0.34 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.04 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 40) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:59:18,693:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.368852\n",
"[INFO] [2016-08-16 07:59:18,696:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.897388 seconds\n",
"[INFO] [2016-08-16 07:59:18,700:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (261)!.\n",
"[INFO] [2016-08-16 07:59:18,703:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (261)!\n",
"[ERROR] [2016-08-16 07:59:18,718:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:18,799:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:18,922:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:18,982:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[INFO] [2016-08-16 07:59:19,102:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 279. configuration. Duration: 0.718289; loss: 0.647541; status 1; additional run info: ;duration: 0.7182888984680176;num_run:00279 \n",
"[INFO] [2016-08-16 07:59:19,111:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 280. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:19,114:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: entropy\n",
" classifier:decision_tree:max_depth, Value: 1.13200263385\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 10\n",
" classifier:decision_tree:min_samples_split, Value: 11\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: gini\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 0.768794206873\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 11\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 10\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:19,403:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 280. configuration. Duration: 0.236581; loss: 0.659836; status 1; additional run info: ;duration: 0.23658084869384766;num_run:00280 \n",
"[INFO] [2016-08-16 07:59:19,413:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 281. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:19,416:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:20,198:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 281. configuration. Duration: 0.718480; loss: 0.647541; status 1; additional run info: ;duration: 0.7184803485870361;num_run:00281 \n",
"[INFO] [2016-08-16 07:59:20,208:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 282. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:20,210:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: gini\n",
" classifier:decision_tree:max_depth, Value: 0.0977208800612\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 1\n",
" classifier:decision_tree:min_samples_split, Value: 10\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:minimum_fraction, Value: 0.0464557352021\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: extra_trees_preproc_for_classification\n",
" preprocessor:extra_trees_preproc_for_classification:bootstrap, Value: True\n",
" preprocessor:extra_trees_preproc_for_classification:criterion, Value: entropy\n",
" preprocessor:extra_trees_preproc_for_classification:max_depth, Constant: None\n",
" preprocessor:extra_trees_preproc_for_classification:max_features, Value: 4.24066599335\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_leaf, Value: 9\n",
" preprocessor:extra_trees_preproc_for_classification:min_samples_split, Value: 5\n",
" preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf, Constant: 0.0\n",
" preprocessor:extra_trees_preproc_for_classification:n_estimators, Constant: 100\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n",
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/__init__.py:93: DeprecationWarning: Function transform is deprecated; Support to use estimators as feature selectors will be removed in version 0.19. Use SelectFromModel instead.\n",
" warnings.warn(msg, category=DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:20,516:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 282. configuration. Duration: 0.253466; loss: 1.032787; status 1; additional run info: ;duration: 0.2534658908843994;num_run:00282 \n",
"[INFO] [2016-08-16 07:59:20,526:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 283. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:20,529:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:21,305:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 283. configuration. Duration: 0.712936; loss: 0.647541; status 1; additional run info: ;duration: 0.7129359245300293;num_run:00283 \n",
"[INFO] [2016-08-16 07:59:21,314:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 284. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:21,316:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 138\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 0.0221718766779\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0148630536496\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: deflation\n",
" preprocessor:fast_ica:fun, Value: logcosh\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:59:21,517:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.368852 2: 0.364754 3: 0.368852 4: 0.368852 5: 0.368852 6: 0.372951 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.368852 11: 0.368852 12: 0.368852 13: 0.368852 14: 0.368852 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.372951 20: 0.368852 21: 0.368852 22: 0.372951 23: 0.368852 24: 0.368852 25: 0.372951 26: 0.368852 27: 0.368852 28: 0.372951 29: 0.368852 30: 0.368852 31: 0.372951 32: 0.368852 33: 0.368852 34: 0.372951 35: 0.368852 36: 0.368852 37: 0.368852 38: 0.368852 39: 0.372951 40: 0.368852 41: 0.372951 42: 0.372951 43: 0.368852 44: 0.372951 45: 0.372951 46: 0.368852 47: 0.368852 48: 0.372951 49: 0.368852\n",
"\tMembers: [3, 0, 0, 3, 0, 3, 20, 3, 0, 0, 0, 3, 0, 0, 3, 0, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 0, 3, 0, 3, 0, 0, 3, 20, 0, 3, 0, 0, 3, 0, 0, 0, 3, 0]\n",
"\tWeights: [ 0.62 0. 0. 0.34 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.04 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 40) (1, 79) (1, 149)\n",
"[INFO] [2016-08-16 07:59:21,525:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.368852\n",
"[INFO] [2016-08-16 07:59:21,528:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.815318 seconds\n",
"[INFO] [2016-08-16 07:59:21,530:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:59:22,783:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 284. configuration. Duration: 1.426649; loss: 0.725410; status 1; additional run info: ;duration: 1.4266493320465088;num_run:00284 \n",
"[INFO] [2016-08-16 07:59:22,792:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 285. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:22,795:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:23,531:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 285. configuration. Duration: 0.677448; loss: 0.647541; status 1; additional run info: ;duration: 0.6774475574493408;num_run:00285 \n",
"[INFO] [2016-08-16 07:59:23,540:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 286. configuration (from SMAC) with time limit 360s.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run27\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:23,542:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: passive_aggressive\n",
" classifier:passive_aggressive:C, Value: 0.771761127786\n",
" classifier:passive_aggressive:fit_intercept, Constant: True\n",
" classifier:passive_aggressive:loss, Value: squared_hinge\n",
" classifier:passive_aggressive:n_iter, Value: 891\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[ERROR] [2016-08-16 07:59:23,549:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:23,624:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:23,736:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:23,792:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 07:59:23,848:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 07:59:24,506:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 286. configuration. Duration: 0.927629; loss: 0.774590; status 1; additional run info: ;duration: 0.9276289939880371;num_run:00286 \n",
"[INFO] [2016-08-16 07:59:24,515:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 287. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:24,516:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.202887549518\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 2\n",
" classifier:xgradient_boosting:min_child_weight, Value: 18\n",
" classifier:xgradient_boosting:n_estimators, Value: 274\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.184788347556\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:24,773:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 287. configuration. Duration: 0.218101; loss: 0.684426; status 1; additional run info: ;duration: 0.21810078620910645;num_run:00287 \n",
"[INFO] [2016-08-16 07:59:24,783:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 288. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:24,784:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gaussian_nb\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00055320016553\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:59:24,857:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 288. configuration. Duration: 0.035266; loss: 0.885246; status 1; additional run info: ;duration: 0.03526639938354492;num_run:00288 \n",
"[INFO] [2016-08-16 07:59:24,865:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 289. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:24,866:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 4.09855757803\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 5\n",
" classifier:random_forest:min_samples_split, Value: 2\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: manhattan\n",
" preprocessor:feature_agglomeration:linkage, Value: average\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 168\n",
" preprocessor:feature_agglomeration:pooling_func, Value: median\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:59:25,172:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 289. configuration. Duration: 0.258311; loss: 0.680328; status 1; additional run info: ;duration: 0.25831103324890137;num_run:00289 \n",
"[INFO] [2016-08-16 07:59:25,182:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 290. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:25,183:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: False\n",
" classifier:random_forest:criterion, Value: gini\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 2.27904317663\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 19\n",
" classifier:random_forest:min_samples_split, Value: 10\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:25,573:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 290. configuration. Duration: 0.341999; loss: 0.663934; status 1; additional run info: ;duration: 0.3419992923736572;num_run:00290 \n",
"[INFO] [2016-08-16 07:59:25,582:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 291. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:25,585:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: liblinear_svc\n",
" classifier:liblinear_svc:C, Value: 0.0410131273069\n",
" classifier:liblinear_svc:dual, Constant: False\n",
" classifier:liblinear_svc:fit_intercept, Constant: True\n",
" classifier:liblinear_svc:intercept_scaling, Constant: 1\n",
" classifier:liblinear_svc:loss, Value: squared_hinge\n",
" classifier:liblinear_svc:multi_class, Constant: ovr\n",
" classifier:liblinear_svc:penalty, Value: l2\n",
" classifier:liblinear_svc:tol, Value: 0.0515621199664\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: min/max\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:25,646:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 291. configuration. Duration: 0.023308; loss: 0.815574; status 1; additional run info: ;duration: 0.02330756187438965;num_run:00291 \n",
"[INFO] [2016-08-16 07:59:25,654:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 292. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:25,655:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0425186750571\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 5\n",
" classifier:xgradient_boosting:min_child_weight, Value: 20\n",
" classifier:xgradient_boosting:n_estimators, Value: 104\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.412491208946\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.0278282782885\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kernel_pca\n",
" preprocessor:kernel_pca:coef0, Value: -0.0333107781191\n",
" preprocessor:kernel_pca:degree, Value: 3\n",
" preprocessor:kernel_pca:gamma, Value: 3.85533308175\n",
" preprocessor:kernel_pca:kernel, Value: poly\n",
" preprocessor:kernel_pca:n_components, Value: 850\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:26,272:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.356557 2: 0.372951 3: 0.360656 4: 0.360656 5: 0.372951 6: 0.364754 7: 0.364754 8: 0.372951 9: 0.364754 10: 0.368852 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.372951 15: 0.368852 16: 0.368852 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.377049 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.368852 32: 0.368852 33: 0.372951 34: 0.368852 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.368852 39: 0.372951 40: 0.372951 41: 0.368852 42: 0.372951 43: 0.372951 44: 0.368852 45: 0.368852 46: 0.372951 47: 0.368852 48: 0.372951 49: 0.372951\n",
"\tMembers: [1, 46, 0, 1, 46, 0, 1, 46, 0, 0, 46, 0, 0, 46, 1, 0, 46, 1, 1, 46, 0, 0, 46, 0, 0, 46, 1, 0, 46, 0, 0, 0, 46, 1, 0, 0, 46, 16, 46, 0, 0, 1, 46, 0, 0, 46, 0, 1, 46, 0]\n",
"\tWeights: [ 0.46 0.2 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.32 0. 0. 0. ]\n",
"\tIdentifiers: (1, 71) (1, 79) (1, 143) (1, 280)\n",
"[INFO] [2016-08-16 07:59:26,280:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:59:26,282:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.739142 seconds\n",
"[INFO] [2016-08-16 07:59:26,286:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (271)!.\n",
"[INFO] [2016-08-16 07:59:26,288:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (271)!\n",
"[ERROR] [2016-08-16 07:59:26,302:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:26,378:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:26,493:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:26,549:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 07:59:26,606:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 07:59:28,901:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.356557 2: 0.372951 3: 0.360656 4: 0.360656 5: 0.372951 6: 0.364754 7: 0.364754 8: 0.372951 9: 0.364754 10: 0.368852 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.372951 15: 0.368852 16: 0.368852 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.377049 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.368852 32: 0.368852 33: 0.372951 34: 0.368852 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.368852 39: 0.372951 40: 0.372951 41: 0.368852 42: 0.372951 43: 0.372951 44: 0.368852 45: 0.368852 46: 0.372951 47: 0.368852 48: 0.372951 49: 0.372951\n",
"\tMembers: [1, 46, 0, 1, 46, 0, 1, 46, 0, 0, 46, 0, 0, 46, 1, 0, 46, 1, 1, 46, 0, 0, 46, 0, 0, 46, 1, 0, 46, 0, 0, 0, 46, 1, 0, 0, 46, 16, 46, 0, 0, 1, 46, 0, 0, 46, 0, 1, 46, 0]\n",
"\tWeights: [ 0.46 0.2 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.32 0. 0. 0. ]\n",
"\tIdentifiers: (1, 71) (1, 79) (1, 143) (1, 280)\n",
"[INFO] [2016-08-16 07:59:28,907:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:59:28,910:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.612866 seconds\n",
"[INFO] [2016-08-16 07:59:28,912:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:59:31,441:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 292. configuration. Duration: 5.716160; loss: 0.688525; status 1; additional run info: ;duration: 5.71616005897522;num_run:00292 \n",
"[INFO] [2016-08-16 07:59:32,052:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 236 training points for SMAC.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run28\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:59:32,939:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:33,011:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:33,122:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:33,174:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 07:59:33,229:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 07:59:35,380:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.356557 2: 0.372951 3: 0.360656 4: 0.360656 5: 0.372951 6: 0.364754 7: 0.364754 8: 0.372951 9: 0.364754 10: 0.368852 11: 0.372951 12: 0.368852 13: 0.368852 14: 0.372951 15: 0.368852 16: 0.368852 17: 0.372951 18: 0.372951 19: 0.372951 20: 0.372951 21: 0.372951 22: 0.377049 23: 0.372951 24: 0.372951 25: 0.372951 26: 0.372951 27: 0.372951 28: 0.372951 29: 0.372951 30: 0.372951 31: 0.368852 32: 0.368852 33: 0.372951 34: 0.368852 35: 0.368852 36: 0.368852 37: 0.372951 38: 0.368852 39: 0.372951 40: 0.372951 41: 0.368852 42: 0.372951 43: 0.372951 44: 0.368852 45: 0.368852 46: 0.372951 47: 0.368852 48: 0.372951 49: 0.372951\n",
"\tMembers: [1, 46, 0, 1, 46, 0, 1, 46, 0, 0, 46, 0, 0, 46, 1, 0, 46, 1, 1, 46, 0, 0, 46, 0, 0, 46, 1, 0, 46, 0, 0, 0, 46, 1, 0, 0, 46, 16, 46, 0, 0, 1, 46, 0, 0, 46, 0, 1, 46, 0]\n",
"\tWeights: [ 0.46 0.2 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0.02 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0.32 0. 0. 0. ]\n",
"\tIdentifiers: (1, 71) (1, 79) (1, 143) (1, 280)\n",
"[INFO] [2016-08-16 07:59:35,387:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:59:35,388:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.454352 seconds\n",
"[INFO] [2016-08-16 07:59:35,390:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:59:47,352:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 15.2981 seconds to find next configurations\n",
"[INFO] [2016-08-16 07:59:47,358:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 293. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:47,360:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:47,939:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 293. configuration. Duration: 0.529970; loss: 0.647541; status 1; additional run info: ;duration: 0.5299701690673828;num_run:00293 \n",
"[INFO] [2016-08-16 07:59:47,947:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 294. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:47,948:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: liblinear_svc\n",
" classifier:liblinear_svc:C, Value: 1997.81172408\n",
" classifier:liblinear_svc:dual, Constant: False\n",
" classifier:liblinear_svc:fit_intercept, Constant: True\n",
" classifier:liblinear_svc:intercept_scaling, Constant: 1\n",
" classifier:liblinear_svc:loss, Value: squared_hinge\n",
" classifier:liblinear_svc:multi_class, Constant: ovr\n",
" classifier:liblinear_svc:penalty, Value: l2\n",
" classifier:liblinear_svc:tol, Value: 0.00277116924811\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.0215353123495\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/utils/class_weight.py:62: DeprecationWarning: The class_weight='auto' heuristic is deprecated in 0.17 in favor of a new heuristic class_weight='balanced'. 'auto' will be removed in 0.19\n",
" \" 0.19\", DeprecationWarning)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:48,029:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 294. configuration. Duration: 0.047231; loss: 0.684426; status 1; additional run info: ;duration: 0.047231435775756836;num_run:00294 \n",
"[INFO] [2016-08-16 07:59:48,036:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 295. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:48,037:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:48,612:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 295. configuration. Duration: 0.525492; loss: 0.647541; status 1; additional run info: ;duration: 0.5254919528961182;num_run:00295 \n",
"[INFO] [2016-08-16 07:59:48,618:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 296. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:48,619:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.0237053925316\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 7\n",
" classifier:gradient_boosting:max_features, Value: 2.2088132815\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 17\n",
" classifier:gradient_boosting:min_samples_split, Value: 6\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 120\n",
" classifier:gradient_boosting:subsample, Value: 0.584900307831\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: cube\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run28\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:59:49,460:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:49,519:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:49,611:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:49,655:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 07:59:49,703:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 07:59:49,958:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 296. configuration. Duration: 1.286519; loss: 0.733607; status 1; additional run info: ;duration: 1.2865190505981445;num_run:00296 \n",
"[INFO] [2016-08-16 07:59:49,964:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 297. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:49,965:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:50,550:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 297. configuration. Duration: 0.532373; loss: 0.647541; status 1; additional run info: ;duration: 0.5323727130889893;num_run:00297 \n",
"[INFO] [2016-08-16 07:59:50,557:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 298. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:50,558:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: random_forest\n",
" classifier:random_forest:bootstrap, Value: True\n",
" classifier:random_forest:criterion, Value: entropy\n",
" classifier:random_forest:max_depth, Constant: None\n",
" classifier:random_forest:max_features, Value: 2.54828115647\n",
" classifier:random_forest:max_leaf_nodes, Constant: None\n",
" classifier:random_forest:min_samples_leaf, Value: 10\n",
" classifier:random_forest:min_samples_split, Value: 12\n",
" classifier:random_forest:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:random_forest:n_estimators, Constant: 100\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: random_trees_embedding\n",
" preprocessor:random_trees_embedding:max_depth, Value: 3\n",
" preprocessor:random_trees_embedding:max_leaf_nodes, Constant: None\n",
" preprocessor:random_trees_embedding:min_samples_leaf, Value: 12\n",
" preprocessor:random_trees_embedding:min_samples_split, Value: 9\n",
" preprocessor:random_trees_embedding:min_weight_fraction_leaf, Constant: 1.0\n",
" preprocessor:random_trees_embedding:n_estimators, Value: 42\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:59:50,935:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 298. configuration. Duration: 0.336071; loss: 0.692623; status 1; additional run info: ;duration: 0.3360710144042969;num_run:00298 \n",
"[INFO] [2016-08-16 07:59:50,941:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 299. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:50,943:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.000488045939212\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:51,531:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 299. configuration. Duration: 0.537658; loss: 0.647541; status 1; additional run info: ;duration: 0.5376579761505127;num_run:00299 \n",
"[INFO] [2016-08-16 07:59:51,538:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 300. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:51,540:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: adaboost\n",
" classifier:adaboost:algorithm, Value: SAMME\n",
" classifier:adaboost:learning_rate, Value: 0.102109207021\n",
" classifier:adaboost:max_depth, Value: 1\n",
" classifier:adaboost:n_estimators, Value: 119\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: True\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:59:51,552:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.356557 2: 0.372951 3: 0.364754 4: 0.368852 5: 0.364754 6: 0.360656 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.368852 11: 0.364754 12: 0.368852 13: 0.364754 14: 0.368852 15: 0.368852 16: 0.368852 17: 0.364754 18: 0.364754 19: 0.368852 20: 0.368852 21: 0.368852 22: 0.364754 23: 0.364754 24: 0.364754 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.364754 30: 0.368852 31: 0.368852 32: 0.368852 33: 0.368852 34: 0.368852 35: 0.368852 36: 0.368852 37: 0.368852 38: 0.368852 39: 0.368852 40: 0.368852 41: 0.364754 42: 0.364754 43: 0.368852 44: 0.372951 45: 0.372951 46: 0.372951 47: 0.372951 48: 0.372951 49: 0.372951\n",
"\tMembers: [0, 44, 0, 14, 0, 0, 0, 0, 12, 44, 0, 0, 44, 0, 14, 0, 44, 0, 0, 12, 0, 0, 0, 0, 0, 44, 0, 0, 0, 0, 44, 0, 0, 0, 0, 44, 0, 0, 0, 0, 0, 0, 44, 12, 12, 0, 0, 44, 0, 0]\n",
"\tWeights: [ 0.7 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.08 0. 0.04 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0.18 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 79) (1, 121) (1, 143) (1, 280)\n",
"[INFO] [2016-08-16 07:59:51,558:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:59:51,559:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.104016 seconds\n",
"[INFO] [2016-08-16 07:59:51,562:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (281)!.\n",
"[INFO] [2016-08-16 07:59:51,564:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (281)!\n",
"[ERROR] [2016-08-16 07:59:51,576:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:51,638:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:51,733:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:51,779:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 07:59:51,827:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 07:59:53,277:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 300. configuration. Duration: 1.695969; loss: 0.717213; status 1; additional run info: ;duration: 1.6959686279296875;num_run:00300 \n",
"[INFO] [2016-08-16 07:59:53,284:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 301. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:53,286:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 07:59:53,773:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.356557 2: 0.372951 3: 0.364754 4: 0.368852 5: 0.364754 6: 0.360656 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.360656 11: 0.360656 12: 0.360656 13: 0.360656 14: 0.364754 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.368852 20: 0.364754 21: 0.364754 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.368852 32: 0.368852 33: 0.368852 34: 0.368852 35: 0.368852 36: 0.368852 37: 0.368852 38: 0.368852 39: 0.372951 40: 0.372951 41: 0.368852 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.372951\n",
"\tMembers: [0, 42, 0, 12, 0, 0, 0, 0, 12, 12, 0, 22, 0, 0, 42, 0, 0, 0, 0, 12, 12, 12, 42, 12, 12, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 42, 0, 0, 0, 0, 0, 0, 12]\n",
"\tWeights: [ 0.66 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.22 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.1 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 79) (1, 143) (1, 201) (1, 280)\n",
"[INFO] [2016-08-16 07:59:53,779:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:59:53,780:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.209031 seconds\n",
"[INFO] [2016-08-16 07:59:53,783:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (285)!.\n",
"[INFO] [2016-08-16 07:59:53,785:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (285)!\n",
"[ERROR] [2016-08-16 07:59:53,798:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:53,863:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[INFO] [2016-08-16 07:59:53,911:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 301. configuration. Duration: 0.573492; loss: 0.647541; status 1; additional run info: ;duration: 0.5734922885894775;num_run:00301 \n",
"[INFO] [2016-08-16 07:59:53,918:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 302. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:53,919:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: decision_tree\n",
" classifier:decision_tree:criterion, Value: entropy\n",
" classifier:decision_tree:max_depth, Value: 1.37534990148\n",
" classifier:decision_tree:max_features, Constant: 1.0\n",
" classifier:decision_tree:max_leaf_nodes, Constant: None\n",
" classifier:decision_tree:min_samples_leaf, Value: 14\n",
" classifier:decision_tree:min_samples_split, Value: 8\n",
" classifier:decision_tree:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:decision_tree:splitter, Constant: best\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: fast_ica\n",
" preprocessor:fast_ica:algorithm, Value: parallel\n",
" preprocessor:fast_ica:fun, Value: exp\n",
" preprocessor:fast_ica:whiten, Value: False\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[ERROR] [2016-08-16 07:59:53,963:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[INFO] [2016-08-16 07:59:53,996:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 302. configuration. Duration: 0.034682; loss: 0.790984; status 1; additional run info: ;duration: 0.034682273864746094;num_run:00302 \n",
"[INFO] [2016-08-16 07:59:54,003:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 303. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:54,004:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[ERROR] [2016-08-16 07:59:54,017:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 07:59:54,069:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 07:59:54,642:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 303. configuration. Duration: 0.583653; loss: 0.647541; status 1; additional run info: ;duration: 0.5836532115936279;num_run:00303 \n",
"[INFO] [2016-08-16 07:59:54,650:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 304. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:54,651:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 3.56600128576\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 3\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: True\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:688: UserWarning: Variables are collinear\n",
" warnings.warn(\"Variables are collinear\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 07:59:54,781:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 304. configuration. Duration: 0.086689; loss: 0.897541; status 1; additional run info: ;duration: 0.08668947219848633;num_run:00304 \n",
"[INFO] [2016-08-16 07:59:54,788:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 305. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:54,790:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.0631532365365\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 4\n",
" classifier:gradient_boosting:max_features, Value: 0.993410685749\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 12\n",
" classifier:gradient_boosting:min_samples_split, Value: 12\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 161\n",
" classifier:gradient_boosting:subsample, Value: 0.833832719862\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.00291788854971\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: normalize\n",
"\n",
"[INFO] [2016-08-16 07:59:56,135:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.356557 2: 0.372951 3: 0.364754 4: 0.368852 5: 0.364754 6: 0.360656 7: 0.368852 8: 0.368852 9: 0.368852 10: 0.360656 11: 0.360656 12: 0.360656 13: 0.360656 14: 0.364754 15: 0.368852 16: 0.368852 17: 0.368852 18: 0.368852 19: 0.368852 20: 0.364754 21: 0.364754 22: 0.372951 23: 0.372951 24: 0.372951 25: 0.368852 26: 0.368852 27: 0.368852 28: 0.368852 29: 0.368852 30: 0.368852 31: 0.368852 32: 0.368852 33: 0.368852 34: 0.368852 35: 0.368852 36: 0.368852 37: 0.368852 38: 0.368852 39: 0.372951 40: 0.372951 41: 0.368852 42: 0.368852 43: 0.368852 44: 0.368852 45: 0.368852 46: 0.368852 47: 0.368852 48: 0.368852 49: 0.372951\n",
"\tMembers: [0, 42, 0, 12, 0, 0, 0, 0, 12, 12, 0, 22, 0, 0, 42, 0, 0, 0, 0, 12, 12, 12, 42, 12, 12, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 42, 0, 0, 0, 0, 0, 0, 12]\n",
"\tWeights: [ 0.66 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.22 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0.1 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 79) (1, 143) (1, 201) (1, 280)\n",
"[INFO] [2016-08-16 07:59:56,143:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.372951\n",
"[INFO] [2016-08-16 07:59:56,146:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.353026 seconds\n",
"[INFO] [2016-08-16 07:59:56,149:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 07:59:56,436:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 305. configuration. Duration: 1.581735; loss: 0.754098; status 1; additional run info: ;duration: 1.5817346572875977;num_run:00305 \n",
"[INFO] [2016-08-16 07:59:56,445:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 306. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:56,446:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.0951052247959\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 4\n",
" classifier:gradient_boosting:max_features, Value: 4.22846539056\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 5\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 115\n",
" classifier:gradient_boosting:subsample, Value: 0.623087650485\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00404065871665\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: polynomial\n",
" preprocessor:polynomial:degree, Value: 2\n",
" preprocessor:polynomial:include_bias, Value: False\n",
" preprocessor:polynomial:interaction_only, Value: False\n",
" rescaling:__choice__, Value: standardize\n",
"\n",
"[INFO] [2016-08-16 07:59:57,940:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 306. configuration. Duration: 1.435555; loss: 0.713115; status 1; additional run info: ;duration: 1.4355554580688477;num_run:00306 \n",
"[INFO] [2016-08-16 07:59:57,947:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 307. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:57,950:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: xgradient_boosting\n",
" classifier:xgradient_boosting:base_score, Constant: 0.5\n",
" classifier:xgradient_boosting:colsample_bylevel, Constant: 1\n",
" classifier:xgradient_boosting:colsample_bytree, Constant: 1\n",
" classifier:xgradient_boosting:gamma, Constant: 0\n",
" classifier:xgradient_boosting:learning_rate, Value: 0.0802082120484\n",
" classifier:xgradient_boosting:max_delta_step, Constant: 0\n",
" classifier:xgradient_boosting:max_depth, Value: 7\n",
" classifier:xgradient_boosting:min_child_weight, Value: 6\n",
" classifier:xgradient_boosting:n_estimators, Value: 451\n",
" classifier:xgradient_boosting:reg_alpha, Constant: 0\n",
" classifier:xgradient_boosting:reg_lambda, Constant: 1\n",
" classifier:xgradient_boosting:scale_pos_weight, Constant: 1\n",
" classifier:xgradient_boosting:subsample, Value: 0.590333685403\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run30\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 07:59:58,168:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 07:59:58,236:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 07:59:58,339:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 07:59:58,390:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 07:59:58,442:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 07:59:59,024:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 307. configuration. Duration: 1.034702; loss: 0.688525; status 1; additional run info: ;duration: 1.0347023010253906;num_run:00307 \n",
"[INFO] [2016-08-16 07:59:59,033:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 308. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:59,036:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: lda\n",
" classifier:lda:n_components, Value: 20\n",
" classifier:lda:shrinkage, Value: None\n",
" classifier:lda:tol, Value: 8.50556476578e-05\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 0.450958682306\n",
" preprocessor:kitchen_sinks:n_components, Value: 149\n",
" rescaling:__choice__, Value: min/max\n",
"\n",
"[INFO] [2016-08-16 07:59:59,214:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 308. configuration. Duration: 0.106694; loss: 0.754098; status 1; additional run info: ;duration: 0.10669422149658203;num_run:00308 \n",
"[INFO] [2016-08-16 07:59:59,222:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 309. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 07:59:59,224:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.0123406250819\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.27680648272\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 7\n",
" classifier:gradient_boosting:min_samples_split, Value: 17\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 212\n",
" classifier:gradient_boosting:subsample, Value: 0.270972072129\n",
" imputation:strategy, Value: mean\n",
" one_hot_encoding:minimum_fraction, Value: 0.000500473185773\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: feature_agglomeration\n",
" preprocessor:feature_agglomeration:affinity, Value: manhattan\n",
" preprocessor:feature_agglomeration:linkage, Value: complete\n",
" preprocessor:feature_agglomeration:n_clusters, Value: 131\n",
" preprocessor:feature_agglomeration:pooling_func, Value: max\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 08:00:00,548:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.352459 2: 0.352459 3: 0.352459 4: 0.352459 5: 0.352459 6: 0.352459 7: 0.352459 8: 0.352459 9: 0.352459 10: 0.352459 11: 0.352459 12: 0.352459 13: 0.352459 14: 0.352459 15: 0.352459 16: 0.352459 17: 0.352459 18: 0.356557 19: 0.356557 20: 0.356557 21: 0.356557 22: 0.360656 23: 0.360656 24: 0.360656 25: 0.360656 26: 0.360656 27: 0.356557 28: 0.356557 29: 0.356557 30: 0.356557 31: 0.356557 32: 0.356557 33: 0.356557 34: 0.356557 35: 0.356557 36: 0.356557 37: 0.356557 38: 0.356557 39: 0.356557 40: 0.356557 41: 0.356557 42: 0.356557 43: 0.356557 44: 0.360656 45: 0.360656 46: 0.360656 47: 0.360656 48: 0.360656 49: 0.360656\n",
"\tMembers: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.04 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 79) (1, 143)\n",
"[INFO] [2016-08-16 08:00:00,556:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.360656\n",
"[INFO] [2016-08-16 08:00:00,559:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.395845 seconds\n",
"[INFO] [2016-08-16 08:00:00,563:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many validation set predictions (0)as ensemble predictions (292)!.\n",
"[INFO] [2016-08-16 08:00:00,564:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Could not find as many test set predictions (0) as ensemble predictions (292)!\n",
"[ERROR] [2016-08-16 08:00:00,577:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 08:00:00,647:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 08:00:00,751:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 08:00:00,803:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 08:00:00,856:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 08:00:01,267:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 309. configuration. Duration: 1.971730; loss: 0.668033; status 1; additional run info: ;duration: 1.9717304706573486;num_run:00309 \n",
"[INFO] [2016-08-16 08:00:01,275:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 310. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 08:00:01,277:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: qda\n",
" classifier:qda:reg_param, Value: 2.91906564205\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00038327831332\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: kitchen_sinks\n",
" preprocessor:kitchen_sinks:gamma, Value: 0.661046144339\n",
" preprocessor:kitchen_sinks:n_components, Value: 9993\n",
" rescaling:__choice__, Value: standardize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/opt/conda/lib/python3.5/site-packages/sklearn/discriminant_analysis.py:688: UserWarning: Variables are collinear\n",
" warnings.warn(\"Variables are collinear\")\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[INFO] [2016-08-16 08:00:04,298:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.352459 2: 0.352459 3: 0.352459 4: 0.352459 5: 0.352459 6: 0.352459 7: 0.352459 8: 0.352459 9: 0.352459 10: 0.352459 11: 0.352459 12: 0.352459 13: 0.352459 14: 0.352459 15: 0.352459 16: 0.352459 17: 0.352459 18: 0.356557 19: 0.356557 20: 0.356557 21: 0.356557 22: 0.360656 23: 0.360656 24: 0.360656 25: 0.360656 26: 0.360656 27: 0.356557 28: 0.356557 29: 0.356557 30: 0.356557 31: 0.356557 32: 0.356557 33: 0.356557 34: 0.356557 35: 0.356557 36: 0.356557 37: 0.356557 38: 0.356557 39: 0.356557 40: 0.356557 41: 0.356557 42: 0.356557 43: 0.356557 44: 0.360656 45: 0.360656 46: 0.360656 47: 0.360656 48: 0.360656 49: 0.360656\n",
"\tMembers: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.04 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 79) (1, 143)\n",
"[INFO] [2016-08-16 08:00:04,319:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.360656\n",
"[INFO] [2016-08-16 08:00:04,324:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 3.751712 seconds\n",
"[INFO] [2016-08-16 08:00:04,329:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 08:00:05,134:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 310. configuration. Duration: 3.684906; loss: 0.909836; status 1; additional run info: ;duration: 3.684906005859375;num_run:00310 \n",
"[INFO] [2016-08-16 08:00:05,889:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Using 249 training points for SMAC.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run31\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 08:00:06,355:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 08:00:06,441:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 08:00:06,571:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00176.npy has score: -0.159836065574\n",
"[ERROR] [2016-08-16 08:00:06,634:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00228.npy has score: -0.016393442623\n",
"[ERROR] [2016-08-16 08:00:06,700:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00282.npy has score: -0.0327868852459\n",
"[INFO] [2016-08-16 08:00:09,326:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble Selection:\n",
"\tTrajectory: 0: 0.352459 1: 0.352459 2: 0.352459 3: 0.352459 4: 0.352459 5: 0.352459 6: 0.352459 7: 0.352459 8: 0.352459 9: 0.352459 10: 0.352459 11: 0.352459 12: 0.352459 13: 0.352459 14: 0.352459 15: 0.352459 16: 0.352459 17: 0.352459 18: 0.356557 19: 0.356557 20: 0.356557 21: 0.356557 22: 0.360656 23: 0.360656 24: 0.360656 25: 0.360656 26: 0.360656 27: 0.356557 28: 0.356557 29: 0.356557 30: 0.356557 31: 0.356557 32: 0.356557 33: 0.356557 34: 0.356557 35: 0.356557 36: 0.356557 37: 0.356557 38: 0.356557 39: 0.356557 40: 0.356557 41: 0.356557 42: 0.356557 43: 0.356557 44: 0.360656 45: 0.360656 46: 0.360656 47: 0.360656 48: 0.360656 49: 0.360656\n",
"\tMembers: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0]\n",
"\tWeights: [ 0.96 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0.04 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n",
"\tIdentifiers: (1, 79) (1, 143)\n",
"[INFO] [2016-08-16 08:00:09,333:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Training performance: 0.360656\n",
"[INFO] [2016-08-16 08:00:09,335:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Building the ensemble took 2.986415 seconds\n",
"[INFO] [2016-08-16 08:00:09,337:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Ensemble output did not change.\n",
"[INFO] [2016-08-16 08:00:18,565:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Used 12.6745 seconds to find next configurations\n",
"[INFO] [2016-08-16 08:00:18,571:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 311. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 08:00:18,573:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: none\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.1\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 3\n",
" classifier:gradient_boosting:max_features, Value: 1.0\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 1\n",
" classifier:gradient_boosting:min_samples_split, Value: 2\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 100\n",
" classifier:gradient_boosting:subsample, Value: 1.0\n",
" imputation:strategy, Value: median\n",
" one_hot_encoding:minimum_fraction, Value: 0.00382888292142\n",
" one_hot_encoding:use_minimum_fraction, Value: True\n",
" preprocessor:__choice__, Value: no_preprocessing\n",
" rescaling:__choice__, Value: none\n",
"\n",
"[INFO] [2016-08-16 08:00:19,155:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Finished evaluating 311. configuration. Duration: 0.532541; loss: 0.647541; status 1; additional run info: ;duration: 0.5325412750244141;num_run:00311 \n",
"[INFO] [2016-08-16 08:00:19,163:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Starting to evaluate 312. configuration (from SMAC) with time limit 360s.\n",
"[INFO] [2016-08-16 08:00:19,165:AutoMLSMBO(1)::7a3a10b65f5366b76f046add6da72a1c] Configuration:\n",
" balancing:strategy, Value: weighting\n",
" classifier:__choice__, Value: gradient_boosting\n",
" classifier:gradient_boosting:learning_rate, Value: 0.0110206252034\n",
" classifier:gradient_boosting:loss, Constant: deviance\n",
" classifier:gradient_boosting:max_depth, Value: 8\n",
" classifier:gradient_boosting:max_features, Value: 3.01751308323\n",
" classifier:gradient_boosting:max_leaf_nodes, Constant: None\n",
" classifier:gradient_boosting:min_samples_leaf, Value: 12\n",
" classifier:gradient_boosting:min_samples_split, Value: 20\n",
" classifier:gradient_boosting:min_weight_fraction_leaf, Constant: 0.0\n",
" classifier:gradient_boosting:n_estimators, Value: 391\n",
" classifier:gradient_boosting:subsample, Value: 0.960310296709\n",
" imputation:strategy, Value: most_frequent\n",
" one_hot_encoding:use_minimum_fraction, Value: False\n",
" preprocessor:__choice__, Value: select_percentile_classification\n",
" preprocessor:select_percentile_classification:percentile, Value: 24.4276507313\n",
" preprocessor:select_percentile_classification:score_func, Value: chi2\n",
" rescaling:__choice__, Value: normalize\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are already timing task: index_run31\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[ERROR] [2016-08-16 08:00:19,392:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00005.npy has score: -0.0983606557377\n",
"[ERROR] [2016-08-16 08:00:19,452:EnsembleBuilder(1):7a3a10b65f5366b76f046add6da72a1c] Model only predicts at random: /tmp/autosklearn_tmp_676_7642/.auto-sklearn/predictions_ensemble/predictions_ensemble_1_00074.npy has score: -0.0737704918033\n",
"[ERROR] [2016-08-16 08:00:19,542:EnsembleBuilder(1):7a
|
ValueError
|
def save_targets_ensemble(self, targets):
self._make_internals_directory()
if not isinstance(targets, np.ndarray):
raise ValueError(
"Targets must be of type np.ndarray, but is %s" % type(targets)
)
filepath = self._get_targets_ensemble_filename()
# Try to open the file without locking it, this will reduce the
# number of times where we erronously keep a lock on the ensemble
# targets file although the process already was killed
try:
existing_targets = np.load(filepath)
if existing_targets.shape[0] > targets.shape[0] or (
existing_targets.shape == targets.shape
and np.allclose(existing_targets, targets)
):
return filepath
except Exception:
pass
lock_path = filepath + ".lock"
with lockfile.LockFile(lock_path):
if os.path.exists(filepath):
existing_targets = np.load(filepath)
if existing_targets.shape[0] > targets.shape[0] or (
existing_targets.shape == targets.shape
and np.allclose(existing_targets, targets)
):
return filepath
with tempfile.NamedTemporaryFile(
"wb", dir=os.path.dirname(filepath), delete=False
) as fh:
np.save(fh, targets.astype(np.float32))
tempname = fh.name
os.rename(tempname, filepath)
return filepath
|
def save_targets_ensemble(self, targets):
self._make_internals_directory()
if not isinstance(targets, np.ndarray):
raise ValueError(
"Targets must be of type np.ndarray, but is %s" % type(targets)
)
filepath = self._get_targets_ensemble_filename()
lock_path = filepath + ".lock"
with lockfile.LockFile(lock_path):
if os.path.exists(filepath):
existing_targets = np.load(filepath)
if existing_targets.shape[0] > targets.shape[0] or (
existing_targets.shape == targets.shape
and np.allclose(existing_targets, targets)
):
return filepath
with tempfile.NamedTemporaryFile(
"wb", dir=os.path.dirname(filepath), delete=False
) as fh:
np.save(fh, targets.astype(np.float32))
tempname = fh.name
os.rename(tempname, filepath)
return filepath
|
https://github.com/automl/auto-sklearn/issues/160
|
File "/x/Redly/anaconda3/lib/python3.5/site-packages/lockfile/linklockfile.py", line 31, in acquire
os.link(self.unique_name, self.lock_file)
FileExistsError: [Errno 17] File exists: '/x/Redly/truffles/nfs_share/atsklrn_tmp/.auto-sklearn/mgm-19-6811-41172700.20609-2442176699744317723' -> '/x/Redly/truffles/nfs_share/atsklrn_tmp/.auto-sklearn/true_targets_ensemble.npy.lock.lock'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "nfs_share/Redly-SQL-Example2.py", line 449, in <module>
fit_classifiers(dataframe, pool_size, atsklrn_tempdir, slave_node, predict_only)
File "nfs_share/Redly-SQL-Example2.py", line 271, in fit_classifiers
,ensemble_nbest=15)
File "/x/Redly/anaconda3/lib/python3.5/site-packages/autosklearn/automl.py", line 480, in fit_ensemble
self._proc_ensemble.main()
File "/x/Redly/anaconda3/lib/python3.5/site-packages/autosklearn/ensemble_builder.py", line 94, in main
targets_ensemble = backend.load_targets_ensemble()
File "/x/Redly/anaconda3/lib/python3.5/site-packages/autosklearn/util/backend.py", line 117, in load_targets_ensemble
with lockfile.LockFile(lock_path):
File "/x/Redly/anaconda3/lib/python3.5/site-packages/lockfile/__init__.py", line 197, in __enter__
self.acquire()
File "/x/Redly/anaconda3/lib/python3.5/site-packages/lockfile/linklockfile.py", line 50, in acquire
time.sleep(timeout is not None and timeout / 10 or 0.1)
KeyboardInterrupt
Traceback (most recent call last):
File "/x/Redly/anaconda3/lib/python3.5/site-packages/lockfile/linklockfile.py", line 31, in acquire
os.link(self.unique_name, self.lock_file)
FileExistsError: [Errno 17] File exists: '/x/Redly/truffles/nfs_share/atsklrn_tmp/.auto-sklearn/mgm-19-6811-41172700.20609-2442176699744317723' -> '/x/Redly/truffles/nfs_share/atsklrn_tmp/.auto-sklearn/true_targets_ensemble.npy.lock.lock'
|
FileExistsError
|
def estimate_ate(
self,
X,
treatment,
y,
p=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
te = self.fit_predict(X, treatment, y, p)
if p is None:
p = self.propensity
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
w = (treatment == group).astype(int)
prob_treatment = float(sum(w)) / X.shape[0]
_ate = te[:, i].mean()
se = (
np.sqrt(
(self.vars_t[group] / prob_treatment)
+ (self.vars_c[group] / (1 - prob_treatment))
+ te[:, i].var()
)
/ X.shape[0]
)
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
model_mu_global = deepcopy(self.model_mu)
models_tau_global = deepcopy(self.models_tau)
logger.info("Bootstrap Confidence Intervals for ATE")
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
ate_bootstraps[:, n] = cate_b.mean()
ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1)
ate_upper = np.percentile(
ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.model_mu = deepcopy(model_mu_global)
self.models_tau = deepcopy(models_tau_global)
return ate, ate_lower, ate_upper
|
def estimate_ate(
self,
X,
treatment,
y,
p=None,
bootstrap_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
te = self.fit_predict(X, treatment, y, p)
if p is None:
p = self.propensity
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, np.ndarray):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
w = (treatment == group).astype(int)
prob_treatment = float(sum(w)) / X.shape[0]
_ate = te[:, i].mean()
se = (
np.sqrt(
(self.vars_t[group] / prob_treatment)
+ (self.vars_c[group] / (1 - prob_treatment))
+ te[:, i].var()
)
/ X.shape[0]
)
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
model_mu_global = deepcopy(self.model_mu)
models_tau_global = deepcopy(self.models_tau)
logger.info("Bootstrap Confidence Intervals for ATE")
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
ate_bootstraps[:, n] = cate_b.mean()
ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1)
ate_upper = np.percentile(
ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1
)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.model_mu = deepcopy(model_mu_global)
self.models_tau = deepcopy(models_tau_global)
return ate, ate_lower, ate_upper
|
https://github.com/uber/causalml/issues/241
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-59-76ffa2ace99a> in <module>
1 learner_x = BaseXRegressor(learner=XGBRegressor(random_state=42))
----> 2 te, lb, ub = learner_x.estimate_ate(X=X, p=df['p'], treatment=treatment, y=y)
3 print('Average Treatment Effect (BaseXRegressor using XGBoost): {:.2f} ({:.2f}, {:.2f})'.format(te[0], lb[0], ub[0]))
/dsw/snapshots/720ce1e8-1e96-45bf-a192-8bef9122fb73/python3/lib/python3.6/site-packages/causalml/inference/meta/xlearner.py in estimate_ate(self, X, treatment, y, p, bootstrap_ci, n_bootstraps, bootstrap_size)
326 dhat_c = dhat_cs[group][mask]
327 dhat_t = dhat_ts[group][mask]
--> 328 p_filt = p[group][mask]
329
330 # SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.
IndexError: invalid index to scalar variable.
|
IndexError
|
def fit_predict(
self,
X,
treatment,
y,
p=None,
return_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
return_components=False,
verbose=True,
):
"""Fit the treatment effect and outcome models of the R learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment]
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
self.fit(X, treatment, y, p)
if p is None:
p = self.propensity
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
te = self.predict(
X, treatment=treatment, y=y, p=p, return_components=return_components
)
if not return_ci:
return te
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_c_global = deepcopy(self.models_tau_c)
models_tau_t_global = deepcopy(self.models_tau_t)
te_bootstraps = np.zeros(
shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)
)
logger.info("Bootstrap Confidence Intervals")
for i in tqdm(range(n_bootstraps)):
te_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
te_bootstraps[:, :, i] = te_b
te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2)
te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau_c = deepcopy(models_tau_c_global)
self.models_tau_t = deepcopy(models_tau_t_global)
return (te, te_lower, te_upper)
|
def fit_predict(
self,
X,
treatment,
y,
p=None,
return_ci=False,
n_bootstraps=1000,
bootstrap_size=10000,
return_components=False,
verbose=True,
):
"""Fit the treatment effect and outcome models of the R learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
return_components (bool, optional): whether to return outcome for treatment and control seperately
verbose (str): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment]
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
self.fit(X, treatment, y, p)
if p is None:
p = self.propensity
check_p_conditions(p, self.t_groups)
if isinstance(p, np.ndarray):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
te = self.predict(
X, treatment=treatment, y=y, p=p, return_components=return_components
)
if not return_ci:
return te
else:
t_groups_global = self.t_groups
_classes_global = self._classes
models_mu_c_global = deepcopy(self.models_mu_c)
models_mu_t_global = deepcopy(self.models_mu_t)
models_tau_c_global = deepcopy(self.models_tau_c)
models_tau_t_global = deepcopy(self.models_tau_t)
te_bootstraps = np.zeros(
shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)
)
logger.info("Bootstrap Confidence Intervals")
for i in tqdm(range(n_bootstraps)):
te_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
te_bootstraps[:, :, i] = te_b
te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2)
te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.models_mu_c = deepcopy(models_mu_c_global)
self.models_mu_t = deepcopy(models_mu_t_global)
self.models_tau_c = deepcopy(models_tau_c_global)
self.models_tau_t = deepcopy(models_tau_t_global)
return (te, te_lower, te_upper)
|
https://github.com/uber/causalml/issues/241
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-59-76ffa2ace99a> in <module>
1 learner_x = BaseXRegressor(learner=XGBRegressor(random_state=42))
----> 2 te, lb, ub = learner_x.estimate_ate(X=X, p=df['p'], treatment=treatment, y=y)
3 print('Average Treatment Effect (BaseXRegressor using XGBoost): {:.2f} ({:.2f}, {:.2f})'.format(te[0], lb[0], ub[0]))
/dsw/snapshots/720ce1e8-1e96-45bf-a192-8bef9122fb73/python3/lib/python3.6/site-packages/causalml/inference/meta/xlearner.py in estimate_ate(self, X, treatment, y, p, bootstrap_ci, n_bootstraps, bootstrap_size)
326 dhat_c = dhat_cs[group][mask]
327 dhat_t = dhat_ts[group][mask]
--> 328 p_filt = p[group][mask]
329
330 # SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.
IndexError: invalid index to scalar variable.
|
IndexError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.