repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.sell | def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs) | python | def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs) | [
"def",
"sell",
"(",
"self",
",",
"product_id",
",",
"order_type",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"place_order",
"(",
"product_id",
",",
"'sell'",
",",
"order_type",
",",
"*",
"*",
"kwargs",
")"
] | Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example. | [
"Place",
"a",
"sell",
"order",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L278-L296 | train | 216,100 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.place_market_order | def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params) | python | def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params) | [
"def",
"place_market_order",
"(",
"self",
",",
"product_id",
",",
"side",
",",
"size",
"=",
"None",
",",
"funds",
"=",
"None",
",",
"client_oid",
"=",
"None",
",",
"stp",
"=",
"None",
",",
"overdraft_enabled",
"=",
"None",
",",
"funding_amount",
"=",
"No... | Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example. | [
"Place",
"market",
"order",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L354-L393 | train | 216,101 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.cancel_all | def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params) | python | def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params) | [
"def",
"cancel_all",
"(",
"self",
",",
"product_id",
"=",
"None",
")",
":",
"if",
"product_id",
"is",
"not",
"None",
":",
"params",
"=",
"{",
"'product_id'",
":",
"product_id",
"}",
"else",
":",
"params",
"=",
"None",
"return",
"self",
".",
"_send_messag... | With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
] | [
"With",
"best",
"effort",
"cancel",
"all",
"open",
"orders",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L460-L482 | train | 216,102 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.get_orders | def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params) | python | def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params) | [
"def",
"get_orders",
"(",
"self",
",",
"product_id",
"=",
"None",
",",
"status",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"kwargs",
"if",
"product_id",
"is",
"not",
"None",
":",
"params",
"[",
"'product_id'",
"]",
"=",
"product_id... | List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
] | [
"List",
"your",
"current",
"open",
"orders",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L519-L582 | train | 216,103 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.get_fundings | def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params) | python | def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params) | [
"def",
"get_fundings",
"(",
"self",
",",
"status",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"}",
"if",
"status",
"is",
"not",
"None",
":",
"params",
"[",
"'status'",
"]",
"=",
"status",
"params",
".",
"update",
"(",
"kwar... | Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
] | [
"Every",
"order",
"placed",
"with",
"a",
"margin",
"profile",
"that",
"draws",
"funding",
"will",
"create",
"a",
"funding",
"record",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L642-L679 | train | 216,104 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.repay_funding | def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params)) | python | def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params)) | [
"def",
"repay_funding",
"(",
"self",
",",
"amount",
",",
"currency",
")",
":",
"params",
"=",
"{",
"'amount'",
":",
"amount",
",",
"'currency'",
":",
"currency",
"# example: USD",
"}",
"return",
"self",
".",
"_send_message",
"(",
"'post'",
",",
"'/funding/re... | Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro. | [
"Repay",
"funding",
".",
"Repays",
"the",
"older",
"funding",
"records",
"first",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L681-L697 | train | 216,105 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.margin_transfer | def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params)) | python | def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params)) | [
"def",
"margin_transfer",
"(",
"self",
",",
"margin_profile_id",
",",
"transfer_type",
",",
"currency",
",",
"amount",
")",
":",
"params",
"=",
"{",
"'margin_profile_id'",
":",
"margin_profile_id",
",",
"'type'",
":",
"transfer_type",
",",
"'currency'",
":",
"cu... | Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
} | [
"Transfer",
"funds",
"between",
"your",
"standard",
"profile",
"and",
"a",
"margin",
"profile",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L699-L734 | train | 216,106 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.close_position | def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params)) | python | def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params)) | [
"def",
"close_position",
"(",
"self",
",",
"repay_only",
")",
":",
"params",
"=",
"{",
"'repay_only'",
":",
"repay_only",
"}",
"return",
"self",
".",
"_send_message",
"(",
"'post'",
",",
"'/position/close'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"pa... | Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented | [
"Close",
"position",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L745-L757 | train | 216,107 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.withdraw | def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params)) | python | def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params)) | [
"def",
"withdraw",
"(",
"self",
",",
"amount",
",",
"currency",
",",
"payment_method_id",
")",
":",
"params",
"=",
"{",
"'amount'",
":",
"amount",
",",
"'currency'",
":",
"currency",
",",
"'payment_method_id'",
":",
"payment_method_id",
"}",
"return",
"self",
... | Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
} | [
"Withdraw",
"funds",
"to",
"a",
"payment",
"method",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L816-L841 | train | 216,108 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.coinbase_withdraw | def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params)) | python | def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params)) | [
"def",
"coinbase_withdraw",
"(",
"self",
",",
"amount",
",",
"currency",
",",
"coinbase_account_id",
")",
":",
"params",
"=",
"{",
"'amount'",
":",
"amount",
",",
"'currency'",
":",
"currency",
",",
"'coinbase_account_id'",
":",
"coinbase_account_id",
"}",
"retu... | Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
} | [
"Withdraw",
"funds",
"to",
"a",
"coinbase",
"account",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L843-L871 | train | 216,109 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.crypto_withdraw | def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params)) | python | def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params)) | [
"def",
"crypto_withdraw",
"(",
"self",
",",
"amount",
",",
"currency",
",",
"crypto_address",
")",
":",
"params",
"=",
"{",
"'amount'",
":",
"amount",
",",
"'currency'",
":",
"currency",
",",
"'crypto_address'",
":",
"crypto_address",
"}",
"return",
"self",
... | Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
} | [
"Withdraw",
"funds",
"to",
"a",
"crypto",
"address",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L873-L894 | train | 216,110 |
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.create_report | def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params)) | python | def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params)) | [
"def",
"create_report",
"(",
"self",
",",
"report_type",
",",
"start_date",
",",
"end_date",
",",
"product_id",
"=",
"None",
",",
"account_id",
"=",
"None",
",",
"report_format",
"=",
"'pdf'",
",",
"email",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'ty... | Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
} | [
"Create",
"report",
"of",
"historic",
"information",
"about",
"your",
"account",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L914-L961 | train | 216,111 |
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient.get_product_order_book | def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params) | python | def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params) | [
"def",
"get_product_order_book",
"(",
"self",
",",
"product_id",
",",
"level",
"=",
"1",
")",
":",
"params",
"=",
"{",
"'level'",
":",
"level",
"}",
"return",
"self",
".",
"_send_message",
"(",
"'get'",
",",
"'/products/{}/book'",
".",
"format",
"(",
"prod... | Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
} | [
"Get",
"a",
"list",
"of",
"open",
"orders",
"for",
"a",
"product",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L52-L90 | train | 216,112 |
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient.get_product_trades | def get_product_trades(self, product_id, before='', after='', limit=None, result=None):
"""List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}]
"""
return self._send_paginated_message('/products/{}/trades'
.format(product_id)) | python | def get_product_trades(self, product_id, before='', after='', limit=None, result=None):
"""List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}]
"""
return self._send_paginated_message('/products/{}/trades'
.format(product_id)) | [
"def",
"get_product_trades",
"(",
"self",
",",
"product_id",
",",
"before",
"=",
"''",
",",
"after",
"=",
"''",
",",
"limit",
"=",
"None",
",",
"result",
"=",
"None",
")",
":",
"return",
"self",
".",
"_send_paginated_message",
"(",
"'/products/{}/trades'",
... | List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}] | [
"List",
"the",
"latest",
"trades",
"for",
"a",
"product",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L117-L147 | train | 216,113 |
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient.get_product_historic_rates | def get_product_historic_rates(self, product_id, start=None, end=None,
granularity=None):
"""Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
]
"""
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if granularity is not None:
acceptedGrans = [60, 300, 900, 3600, 21600, 86400]
if granularity not in acceptedGrans:
raise ValueError( 'Specified granularity is {}, must be in approved values: {}'.format(
granularity, acceptedGrans) )
params['granularity'] = granularity
return self._send_message('get',
'/products/{}/candles'.format(product_id),
params=params) | python | def get_product_historic_rates(self, product_id, start=None, end=None,
granularity=None):
"""Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
]
"""
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if granularity is not None:
acceptedGrans = [60, 300, 900, 3600, 21600, 86400]
if granularity not in acceptedGrans:
raise ValueError( 'Specified granularity is {}, must be in approved values: {}'.format(
granularity, acceptedGrans) )
params['granularity'] = granularity
return self._send_message('get',
'/products/{}/candles'.format(product_id),
params=params) | [
"def",
"get_product_historic_rates",
"(",
"self",
",",
"product_id",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"granularity",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"start",
"is",
"not",
"None",
":",
"params",
"[",
"'start... | Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
] | [
"Historic",
"rates",
"for",
"a",
"product",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L149-L200 | train | 216,114 |
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient._send_message | def _send_message(self, method, endpoint, params=None, data=None):
"""Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response
"""
url = self.url + endpoint
r = self.session.request(method, url, params=params, data=data,
auth=self.auth, timeout=30)
return r.json() | python | def _send_message(self, method, endpoint, params=None, data=None):
"""Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response
"""
url = self.url + endpoint
r = self.session.request(method, url, params=params, data=data,
auth=self.auth, timeout=30)
return r.json() | [
"def",
"_send_message",
"(",
"self",
",",
"method",
",",
"endpoint",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"url",
"+",
"endpoint",
"r",
"=",
"self",
".",
"session",
".",
"request",
"(",
"method",
"... | Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response | [
"Send",
"API",
"request",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L254-L270 | train | 216,115 |
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient._send_paginated_message | def _send_paginated_message(self, endpoint, params=None):
""" Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects
"""
if params is None:
params = dict()
url = self.url + endpoint
while True:
r = self.session.get(url, params=params, auth=self.auth, timeout=30)
results = r.json()
for result in results:
yield result
# If there are no more pages, we're done. Otherwise update `after`
# param to get next page.
# If this request included `before` don't get any more pages - the
# cbpro API doesn't support multiple pages in that case.
if not r.headers.get('cb-after') or \
params.get('before') is not None:
break
else:
params['after'] = r.headers['cb-after'] | python | def _send_paginated_message(self, endpoint, params=None):
""" Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects
"""
if params is None:
params = dict()
url = self.url + endpoint
while True:
r = self.session.get(url, params=params, auth=self.auth, timeout=30)
results = r.json()
for result in results:
yield result
# If there are no more pages, we're done. Otherwise update `after`
# param to get next page.
# If this request included `before` don't get any more pages - the
# cbpro API doesn't support multiple pages in that case.
if not r.headers.get('cb-after') or \
params.get('before') is not None:
break
else:
params['after'] = r.headers['cb-after'] | [
"def",
"_send_paginated_message",
"(",
"self",
",",
"endpoint",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"dict",
"(",
")",
"url",
"=",
"self",
".",
"url",
"+",
"endpoint",
"while",
"True",
":",
"r",
"=",... | Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects | [
"Send",
"API",
"message",
"that",
"results",
"in",
"a",
"paginated",
"response",
"."
] | 0a9dbd86a25ae266d0e0eefeb112368c284b7dcc | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L272-L311 | train | 216,116 |
dask/dask-ml | dask_ml/model_selection/_search.py | check_cv | def check_cv(cv=3, y=None, classifier=False):
"""Dask aware version of ``sklearn.model_selection.check_cv``
Same as the scikit-learn version, but works if ``y`` is a dask object.
"""
if cv is None:
cv = 3
# If ``cv`` is not an integer, the scikit-learn implementation doesn't
# touch the ``y`` object, so passing on a dask object is fine
if not is_dask_collection(y) or not isinstance(cv, numbers.Integral):
return model_selection.check_cv(cv, y, classifier)
if classifier:
# ``y`` is a dask object. We need to compute the target type
target_type = delayed(type_of_target, pure=True)(y).compute()
if target_type in ("binary", "multiclass"):
return StratifiedKFold(cv)
return KFold(cv) | python | def check_cv(cv=3, y=None, classifier=False):
"""Dask aware version of ``sklearn.model_selection.check_cv``
Same as the scikit-learn version, but works if ``y`` is a dask object.
"""
if cv is None:
cv = 3
# If ``cv`` is not an integer, the scikit-learn implementation doesn't
# touch the ``y`` object, so passing on a dask object is fine
if not is_dask_collection(y) or not isinstance(cv, numbers.Integral):
return model_selection.check_cv(cv, y, classifier)
if classifier:
# ``y`` is a dask object. We need to compute the target type
target_type = delayed(type_of_target, pure=True)(y).compute()
if target_type in ("binary", "multiclass"):
return StratifiedKFold(cv)
return KFold(cv) | [
"def",
"check_cv",
"(",
"cv",
"=",
"3",
",",
"y",
"=",
"None",
",",
"classifier",
"=",
"False",
")",
":",
"if",
"cv",
"is",
"None",
":",
"cv",
"=",
"3",
"# If ``cv`` is not an integer, the scikit-learn implementation doesn't",
"# touch the ``y`` object, so passing o... | Dask aware version of ``sklearn.model_selection.check_cv``
Same as the scikit-learn version, but works if ``y`` is a dask object. | [
"Dask",
"aware",
"version",
"of",
"sklearn",
".",
"model_selection",
".",
"check_cv"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L900-L918 | train | 216,117 |
dask/dask-ml | dask_ml/model_selection/_search.py | compute_n_splits | def compute_n_splits(cv, X, y=None, groups=None):
"""Return the number of splits.
Parameters
----------
cv : BaseCrossValidator
X, y, groups : array_like, dask object, or None
Returns
-------
n_splits : int
"""
if not any(is_dask_collection(i) for i in (X, y, groups)):
return cv.get_n_splits(X, y, groups)
if isinstance(cv, (_BaseKFold, BaseShuffleSplit)):
return cv.n_splits
elif isinstance(cv, PredefinedSplit):
return len(cv.unique_folds)
elif isinstance(cv, _CVIterableWrapper):
return len(cv.cv)
elif isinstance(cv, (LeaveOneOut, LeavePOut)) and not is_dask_collection(X):
# Only `X` is referenced for these classes
return cv.get_n_splits(X, None, None)
elif isinstance(cv, (LeaveOneGroupOut, LeavePGroupsOut)) and not is_dask_collection(
groups
):
# Only `groups` is referenced for these classes
return cv.get_n_splits(None, None, groups)
else:
return delayed(cv).get_n_splits(X, y, groups).compute() | python | def compute_n_splits(cv, X, y=None, groups=None):
"""Return the number of splits.
Parameters
----------
cv : BaseCrossValidator
X, y, groups : array_like, dask object, or None
Returns
-------
n_splits : int
"""
if not any(is_dask_collection(i) for i in (X, y, groups)):
return cv.get_n_splits(X, y, groups)
if isinstance(cv, (_BaseKFold, BaseShuffleSplit)):
return cv.n_splits
elif isinstance(cv, PredefinedSplit):
return len(cv.unique_folds)
elif isinstance(cv, _CVIterableWrapper):
return len(cv.cv)
elif isinstance(cv, (LeaveOneOut, LeavePOut)) and not is_dask_collection(X):
# Only `X` is referenced for these classes
return cv.get_n_splits(X, None, None)
elif isinstance(cv, (LeaveOneGroupOut, LeavePGroupsOut)) and not is_dask_collection(
groups
):
# Only `groups` is referenced for these classes
return cv.get_n_splits(None, None, groups)
else:
return delayed(cv).get_n_splits(X, y, groups).compute() | [
"def",
"compute_n_splits",
"(",
"cv",
",",
"X",
",",
"y",
"=",
"None",
",",
"groups",
"=",
"None",
")",
":",
"if",
"not",
"any",
"(",
"is_dask_collection",
"(",
"i",
")",
"for",
"i",
"in",
"(",
"X",
",",
"y",
",",
"groups",
")",
")",
":",
"retu... | Return the number of splits.
Parameters
----------
cv : BaseCrossValidator
X, y, groups : array_like, dask object, or None
Returns
-------
n_splits : int | [
"Return",
"the",
"number",
"of",
"splits",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L921-L956 | train | 216,118 |
dask/dask-ml | dask_ml/model_selection/_search.py | StaticDaskSearchMixin.visualize | def visualize(self, filename="mydask", format=None, **kwargs):
"""Render the task graph for this parameter search using ``graphviz``.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to
``dask.dot.to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See ``dask.dot.dot_graph`` for more information.
"""
check_is_fitted(self, "dask_graph_")
return dask.visualize(
self.dask_graph_, filename=filename, format=format, **kwargs
) | python | def visualize(self, filename="mydask", format=None, **kwargs):
"""Render the task graph for this parameter search using ``graphviz``.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to
``dask.dot.to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See ``dask.dot.dot_graph`` for more information.
"""
check_is_fitted(self, "dask_graph_")
return dask.visualize(
self.dask_graph_, filename=filename, format=format, **kwargs
) | [
"def",
"visualize",
"(",
"self",
",",
"filename",
"=",
"\"mydask\"",
",",
"format",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"\"dask_graph_\"",
")",
"return",
"dask",
".",
"visualize",
"(",
"self",
".",
"dask_g... | Render the task graph for this parameter search using ``graphviz``.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to
``dask.dot.to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See ``dask.dot.dot_graph`` for more information. | [
"Render",
"the",
"task",
"graph",
"for",
"this",
"parameter",
"search",
"using",
"graphviz",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L975-L1000 | train | 216,119 |
dask/dask-ml | dask_ml/model_selection/_search.py | DaskBaseSearchCV.fit | def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, shape = [n_samples], optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
from sklearn.metrics.scorer import _check_multimetric_scoring
scorer, multimetric = _check_multimetric_scoring(
estimator, scoring=self.scoring
)
if not multimetric:
scorer = scorer["score"]
self.multimetric_ = multimetric
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str)
# This will work for both dict / list (tuple)
or self.refit not in scorer
):
raise ValueError(
"For multi-metric scoring, the parameter "
"refit must be set to a scorer key "
"to refit an estimator with the best "
"parameter setting on the whole data and "
"make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was ."
"passed." % self.refit
)
self.scorer_ = scorer
error_score = self.error_score
if not (isinstance(error_score, numbers.Number) or error_score == "raise"):
raise ValueError(
"error_score must be the string 'raise' or a" " numeric value."
)
dsk, keys, n_splits = build_graph(
estimator,
self.cv,
self.scorer_,
list(self._get_param_iterator()),
X,
y,
groups,
fit_params,
iid=self.iid,
refit=self.refit,
error_score=error_score,
return_train_score=self.return_train_score,
cache_cv=self.cache_cv,
multimetric=multimetric,
)
self.dask_graph_ = dsk
self.n_splits_ = n_splits
n_jobs = _normalize_n_jobs(self.n_jobs)
scheduler = dask.base.get_scheduler(scheduler=self.scheduler)
if not scheduler:
scheduler = dask.threaded.get
if scheduler is dask.threaded.get and n_jobs == 1:
scheduler = dask.local.get_sync
out = scheduler(dsk, keys, num_workers=n_jobs)
results = handle_deprecated_train_score(out[0], self.return_train_score)
self.cv_results_ = results
if self.refit:
if self.multimetric_:
key = self.refit
else:
key = "score"
self.best_index_ = np.flatnonzero(results["rank_test_{}".format(key)] == 1)[
0
]
self.best_estimator_ = out[1]
return self | python | def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, shape = [n_samples], optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
from sklearn.metrics.scorer import _check_multimetric_scoring
scorer, multimetric = _check_multimetric_scoring(
estimator, scoring=self.scoring
)
if not multimetric:
scorer = scorer["score"]
self.multimetric_ = multimetric
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str)
# This will work for both dict / list (tuple)
or self.refit not in scorer
):
raise ValueError(
"For multi-metric scoring, the parameter "
"refit must be set to a scorer key "
"to refit an estimator with the best "
"parameter setting on the whole data and "
"make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was ."
"passed." % self.refit
)
self.scorer_ = scorer
error_score = self.error_score
if not (isinstance(error_score, numbers.Number) or error_score == "raise"):
raise ValueError(
"error_score must be the string 'raise' or a" " numeric value."
)
dsk, keys, n_splits = build_graph(
estimator,
self.cv,
self.scorer_,
list(self._get_param_iterator()),
X,
y,
groups,
fit_params,
iid=self.iid,
refit=self.refit,
error_score=error_score,
return_train_score=self.return_train_score,
cache_cv=self.cache_cv,
multimetric=multimetric,
)
self.dask_graph_ = dsk
self.n_splits_ = n_splits
n_jobs = _normalize_n_jobs(self.n_jobs)
scheduler = dask.base.get_scheduler(scheduler=self.scheduler)
if not scheduler:
scheduler = dask.threaded.get
if scheduler is dask.threaded.get and n_jobs == 1:
scheduler = dask.local.get_sync
out = scheduler(dsk, keys, num_workers=n_jobs)
results = handle_deprecated_train_score(out[0], self.return_train_score)
self.cv_results_ = results
if self.refit:
if self.multimetric_:
key = self.refit
else:
key = "score"
self.best_index_ = np.flatnonzero(results["rank_test_{}".format(key)] == 1)[
0
]
self.best_estimator_ = out[1]
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"groups",
"=",
"None",
",",
"*",
"*",
"fit_params",
")",
":",
"estimator",
"=",
"self",
".",
"estimator",
"from",
"sklearn",
".",
"metrics",
".",
"scorer",
"import",
"_check_multimetric_s... | Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, shape = [n_samples], optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params
Parameters passed to the ``fit`` method of the estimator | [
"Run",
"fit",
"with",
"all",
"sets",
"of",
"parameters",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L1117-L1212 | train | 216,120 |
dask/dask-ml | dask_ml/model_selection/_search.py | RandomizedSearchCV._get_param_iterator | def _get_param_iterator(self):
"""Return ParameterSampler instance for the given distributions"""
return model_selection.ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
) | python | def _get_param_iterator(self):
"""Return ParameterSampler instance for the given distributions"""
return model_selection.ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
) | [
"def",
"_get_param_iterator",
"(",
"self",
")",
":",
"return",
"model_selection",
".",
"ParameterSampler",
"(",
"self",
".",
"param_distributions",
",",
"self",
".",
"n_iter",
",",
"random_state",
"=",
"self",
".",
"random_state",
")"
] | Return ParameterSampler instance for the given distributions | [
"Return",
"ParameterSampler",
"instance",
"for",
"the",
"given",
"distributions"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L1605-L1609 | train | 216,121 |
dask/dask-ml | dask_ml/model_selection/_incremental.py | _partial_fit | def _partial_fit(model_and_meta, X, y, fit_params):
"""
Call partial_fit on a classifiers with training data X and y
Arguments
---------
model_and_meta : Tuple[Estimator, dict]
X, y : np.ndarray, np.ndarray
Training data
fit_params : dict
Extra keyword arguments to pass to partial_fit
Returns
-------
Results
A namedtuple with four fields: info, models, history, best
* info : Dict[model_id, List[Dict]]
Keys are integers identifying each model. Values are a
List of Dict
* models : Dict[model_id, Future[Estimator]]
A dictionary with the same keys as `info`. The values
are futures to the fitted models.
* history : List[Dict]
The history of model fitting for each model. Each element
of the list is a dictionary with the following elements:
* model_id : int
A superset of the keys for `info` and `models`.
* params : Dict[str, Any]
Parameters this model was trained with.
* partial_fit_calls : int
The number of *consecutive* partial fit calls at this stage in
this models training history.
* partial_fit_time : float
Time (in seconds) spent on this partial fit
* score : float
Score on the test set for the model at this point in history
* score_time : float
Time (in seconds) spent on this scoring.
* best : Tuple[model_id, Future[Estimator]]]
The estimator with the highest validation score in the final
round.
"""
with log_errors():
start = time()
model, meta = model_and_meta
if len(X):
model = deepcopy(model)
model.partial_fit(X, y, **(fit_params or {}))
meta = dict(meta)
meta["partial_fit_calls"] += 1
meta["partial_fit_time"] = time() - start
return model, meta | python | def _partial_fit(model_and_meta, X, y, fit_params):
"""
Call partial_fit on a classifiers with training data X and y
Arguments
---------
model_and_meta : Tuple[Estimator, dict]
X, y : np.ndarray, np.ndarray
Training data
fit_params : dict
Extra keyword arguments to pass to partial_fit
Returns
-------
Results
A namedtuple with four fields: info, models, history, best
* info : Dict[model_id, List[Dict]]
Keys are integers identifying each model. Values are a
List of Dict
* models : Dict[model_id, Future[Estimator]]
A dictionary with the same keys as `info`. The values
are futures to the fitted models.
* history : List[Dict]
The history of model fitting for each model. Each element
of the list is a dictionary with the following elements:
* model_id : int
A superset of the keys for `info` and `models`.
* params : Dict[str, Any]
Parameters this model was trained with.
* partial_fit_calls : int
The number of *consecutive* partial fit calls at this stage in
this models training history.
* partial_fit_time : float
Time (in seconds) spent on this partial fit
* score : float
Score on the test set for the model at this point in history
* score_time : float
Time (in seconds) spent on this scoring.
* best : Tuple[model_id, Future[Estimator]]]
The estimator with the highest validation score in the final
round.
"""
with log_errors():
start = time()
model, meta = model_and_meta
if len(X):
model = deepcopy(model)
model.partial_fit(X, y, **(fit_params or {}))
meta = dict(meta)
meta["partial_fit_calls"] += 1
meta["partial_fit_time"] = time() - start
return model, meta | [
"def",
"_partial_fit",
"(",
"model_and_meta",
",",
"X",
",",
"y",
",",
"fit_params",
")",
":",
"with",
"log_errors",
"(",
")",
":",
"start",
"=",
"time",
"(",
")",
"model",
",",
"meta",
"=",
"model_and_meta",
"if",
"len",
"(",
"X",
")",
":",
"model",... | Call partial_fit on a classifiers with training data X and y
Arguments
---------
model_and_meta : Tuple[Estimator, dict]
X, y : np.ndarray, np.ndarray
Training data
fit_params : dict
Extra keyword arguments to pass to partial_fit
Returns
-------
Results
A namedtuple with four fields: info, models, history, best
* info : Dict[model_id, List[Dict]]
Keys are integers identifying each model. Values are a
List of Dict
* models : Dict[model_id, Future[Estimator]]
A dictionary with the same keys as `info`. The values
are futures to the fitted models.
* history : List[Dict]
The history of model fitting for each model. Each element
of the list is a dictionary with the following elements:
* model_id : int
A superset of the keys for `info` and `models`.
* params : Dict[str, Any]
Parameters this model was trained with.
* partial_fit_calls : int
The number of *consecutive* partial fit calls at this stage in
this models training history.
* partial_fit_time : float
Time (in seconds) spent on this partial fit
* score : float
Score on the test set for the model at this point in history
* score_time : float
Time (in seconds) spent on this scoring.
* best : Tuple[model_id, Future[Estimator]]]
The estimator with the highest validation score in the final
round. | [
"Call",
"partial_fit",
"on",
"a",
"classifiers",
"with",
"training",
"data",
"X",
"and",
"y"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_incremental.py#L30-L86 | train | 216,122 |
dask/dask-ml | dask_ml/model_selection/_incremental.py | _create_model | def _create_model(model, ident, **params):
""" Create a model by cloning and then setting params """
with log_errors(pdb=True):
model = clone(model).set_params(**params)
return model, {"model_id": ident, "params": params, "partial_fit_calls": 0} | python | def _create_model(model, ident, **params):
""" Create a model by cloning and then setting params """
with log_errors(pdb=True):
model = clone(model).set_params(**params)
return model, {"model_id": ident, "params": params, "partial_fit_calls": 0} | [
"def",
"_create_model",
"(",
"model",
",",
"ident",
",",
"*",
"*",
"params",
")",
":",
"with",
"log_errors",
"(",
"pdb",
"=",
"True",
")",
":",
"model",
"=",
"clone",
"(",
"model",
")",
".",
"set_params",
"(",
"*",
"*",
"params",
")",
"return",
"mo... | Create a model by cloning and then setting params | [
"Create",
"a",
"model",
"by",
"cloning",
"and",
"then",
"setting",
"params"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_incremental.py#L102-L106 | train | 216,123 |
dask/dask-ml | dask_ml/model_selection/_incremental.py | fit | def fit(
model,
params,
X_train,
y_train,
X_test,
y_test,
additional_calls,
fit_params=None,
scorer=None,
random_state=None,
):
""" Find a good model and search among a space of hyper-parameters
This does a hyper-parameter search by creating many models and then fitting
them incrementally on batches of data and reducing the number of models based
on the scores computed during training. Over time fewer and fewer models
remain. We train these models for increasingly long times.
The model, number of starting parameters, and decay can all be provided as
configuration parameters.
Training data should be given as Dask arrays. It can be large. Testing
data should be given either as a small dask array or as a numpy array. It
should fit on a single worker.
Parameters
----------
model : Estimator
params : List[Dict]
Parameters to start training on model
X_train : dask Array
y_train : dask Array
X_test : Array
Numpy array or small dask array. Should fit in single node's memory.
y_test : Array
Numpy array or small dask array. Should fit in single node's memory.
additional_calls : callable
A function that takes information about scoring history per model and
returns the number of additional partial fit calls to run on each model
fit_params : dict
Extra parameters to give to partial_fit
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> import numpy as np
>>> from dask_ml.datasets import make_classification
>>> X, y = make_classification(n_samples=5000000, n_features=20,
... chunks=100000, random_state=0)
>>> from sklearn.linear_model import SGDClassifier
>>> model = SGDClassifier(tol=1e-3, penalty='elasticnet', random_state=0)
>>> from sklearn.model_selection import ParameterSampler
>>> params = {'alpha': np.logspace(-2, 1, num=1000),
... 'l1_ratio': np.linspace(0, 1, num=1000),
... 'average': [True, False]}
>>> params = list(ParameterSampler(params, 10, random_state=0))
>>> X_test, y_test = X[:100000], y[:100000]
>>> X_train = X[100000:]
>>> y_train = y[100000:]
>>> def remove_worst(scores):
... last_score = {model_id: info[-1]['score']
... for model_id, info in scores.items()}
... worst_score = min(last_score.values())
... out = {}
... for model_id, score in last_score.items():
... if score != worst_score:
... out[model_id] = 1 # do one more training step
... if len(out) == 1:
... out = {k: 0 for k in out} # no more work to do, stops execution
... return out
>>> from dask.distributed import Client
>>> client = Client(processes=False)
>>> from dask_ml.model_selection._incremental import fit
>>> info, models, history, best = fit(model, params,
... X_train, y_train,
... X_test, y_test,
... additional_calls=remove_worst,
... fit_params={'classes': [0, 1]},
... random_state=0)
>>> models
{2: <Future: status: finished, type: SGDClassifier, key: ...}
>>> models[2].result()
SGDClassifier(...)
>>> info[2][-1] # doctest: +SKIP
{'model_id': 2,
'params': {'l1_ratio': 0.9529529529529529, 'average': False,
'alpha': 0.014933932161242525},
'partial_fit_calls': 8,
'partial_fit_time': 0.17334818840026855,
'score': 0.58765,
'score_time': 0.031442880630493164}
Returns
-------
info : Dict[int, List[Dict]]
Scoring history of each successful model, keyed by model ID.
This has the parameters, scores, and timing information over time
models : Dict[int, Future]
Dask futures pointing to trained models
history : List[Dict]
A history of all models scores over time
"""
return default_client().sync(
_fit,
model,
params,
X_train,
y_train,
X_test,
y_test,
additional_calls,
fit_params=fit_params,
scorer=scorer,
random_state=random_state,
) | python | def fit(
model,
params,
X_train,
y_train,
X_test,
y_test,
additional_calls,
fit_params=None,
scorer=None,
random_state=None,
):
""" Find a good model and search among a space of hyper-parameters
This does a hyper-parameter search by creating many models and then fitting
them incrementally on batches of data and reducing the number of models based
on the scores computed during training. Over time fewer and fewer models
remain. We train these models for increasingly long times.
The model, number of starting parameters, and decay can all be provided as
configuration parameters.
Training data should be given as Dask arrays. It can be large. Testing
data should be given either as a small dask array or as a numpy array. It
should fit on a single worker.
Parameters
----------
model : Estimator
params : List[Dict]
Parameters to start training on model
X_train : dask Array
y_train : dask Array
X_test : Array
Numpy array or small dask array. Should fit in single node's memory.
y_test : Array
Numpy array or small dask array. Should fit in single node's memory.
additional_calls : callable
A function that takes information about scoring history per model and
returns the number of additional partial fit calls to run on each model
fit_params : dict
Extra parameters to give to partial_fit
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> import numpy as np
>>> from dask_ml.datasets import make_classification
>>> X, y = make_classification(n_samples=5000000, n_features=20,
... chunks=100000, random_state=0)
>>> from sklearn.linear_model import SGDClassifier
>>> model = SGDClassifier(tol=1e-3, penalty='elasticnet', random_state=0)
>>> from sklearn.model_selection import ParameterSampler
>>> params = {'alpha': np.logspace(-2, 1, num=1000),
... 'l1_ratio': np.linspace(0, 1, num=1000),
... 'average': [True, False]}
>>> params = list(ParameterSampler(params, 10, random_state=0))
>>> X_test, y_test = X[:100000], y[:100000]
>>> X_train = X[100000:]
>>> y_train = y[100000:]
>>> def remove_worst(scores):
... last_score = {model_id: info[-1]['score']
... for model_id, info in scores.items()}
... worst_score = min(last_score.values())
... out = {}
... for model_id, score in last_score.items():
... if score != worst_score:
... out[model_id] = 1 # do one more training step
... if len(out) == 1:
... out = {k: 0 for k in out} # no more work to do, stops execution
... return out
>>> from dask.distributed import Client
>>> client = Client(processes=False)
>>> from dask_ml.model_selection._incremental import fit
>>> info, models, history, best = fit(model, params,
... X_train, y_train,
... X_test, y_test,
... additional_calls=remove_worst,
... fit_params={'classes': [0, 1]},
... random_state=0)
>>> models
{2: <Future: status: finished, type: SGDClassifier, key: ...}
>>> models[2].result()
SGDClassifier(...)
>>> info[2][-1] # doctest: +SKIP
{'model_id': 2,
'params': {'l1_ratio': 0.9529529529529529, 'average': False,
'alpha': 0.014933932161242525},
'partial_fit_calls': 8,
'partial_fit_time': 0.17334818840026855,
'score': 0.58765,
'score_time': 0.031442880630493164}
Returns
-------
info : Dict[int, List[Dict]]
Scoring history of each successful model, keyed by model ID.
This has the parameters, scores, and timing information over time
models : Dict[int, Future]
Dask futures pointing to trained models
history : List[Dict]
A history of all models scores over time
"""
return default_client().sync(
_fit,
model,
params,
X_train,
y_train,
X_test,
y_test,
additional_calls,
fit_params=fit_params,
scorer=scorer,
random_state=random_state,
) | [
"def",
"fit",
"(",
"model",
",",
"params",
",",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"additional_calls",
",",
"fit_params",
"=",
"None",
",",
"scorer",
"=",
"None",
",",
"random_state",
"=",
"None",
",",
")",
":",
"return",
"de... | Find a good model and search among a space of hyper-parameters
This does a hyper-parameter search by creating many models and then fitting
them incrementally on batches of data and reducing the number of models based
on the scores computed during training. Over time fewer and fewer models
remain. We train these models for increasingly long times.
The model, number of starting parameters, and decay can all be provided as
configuration parameters.
Training data should be given as Dask arrays. It can be large. Testing
data should be given either as a small dask array or as a numpy array. It
should fit on a single worker.
Parameters
----------
model : Estimator
params : List[Dict]
Parameters to start training on model
X_train : dask Array
y_train : dask Array
X_test : Array
Numpy array or small dask array. Should fit in single node's memory.
y_test : Array
Numpy array or small dask array. Should fit in single node's memory.
additional_calls : callable
A function that takes information about scoring history per model and
returns the number of additional partial fit calls to run on each model
fit_params : dict
Extra parameters to give to partial_fit
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> import numpy as np
>>> from dask_ml.datasets import make_classification
>>> X, y = make_classification(n_samples=5000000, n_features=20,
... chunks=100000, random_state=0)
>>> from sklearn.linear_model import SGDClassifier
>>> model = SGDClassifier(tol=1e-3, penalty='elasticnet', random_state=0)
>>> from sklearn.model_selection import ParameterSampler
>>> params = {'alpha': np.logspace(-2, 1, num=1000),
... 'l1_ratio': np.linspace(0, 1, num=1000),
... 'average': [True, False]}
>>> params = list(ParameterSampler(params, 10, random_state=0))
>>> X_test, y_test = X[:100000], y[:100000]
>>> X_train = X[100000:]
>>> y_train = y[100000:]
>>> def remove_worst(scores):
... last_score = {model_id: info[-1]['score']
... for model_id, info in scores.items()}
... worst_score = min(last_score.values())
... out = {}
... for model_id, score in last_score.items():
... if score != worst_score:
... out[model_id] = 1 # do one more training step
... if len(out) == 1:
... out = {k: 0 for k in out} # no more work to do, stops execution
... return out
>>> from dask.distributed import Client
>>> client = Client(processes=False)
>>> from dask_ml.model_selection._incremental import fit
>>> info, models, history, best = fit(model, params,
... X_train, y_train,
... X_test, y_test,
... additional_calls=remove_worst,
... fit_params={'classes': [0, 1]},
... random_state=0)
>>> models
{2: <Future: status: finished, type: SGDClassifier, key: ...}
>>> models[2].result()
SGDClassifier(...)
>>> info[2][-1] # doctest: +SKIP
{'model_id': 2,
'params': {'l1_ratio': 0.9529529529529529, 'average': False,
'alpha': 0.014933932161242525},
'partial_fit_calls': 8,
'partial_fit_time': 0.17334818840026855,
'score': 0.58765,
'score_time': 0.031442880630493164}
Returns
-------
info : Dict[int, List[Dict]]
Scoring history of each successful model, keyed by model ID.
This has the parameters, scores, and timing information over time
models : Dict[int, Future]
Dask futures pointing to trained models
history : List[Dict]
A history of all models scores over time | [
"Find",
"a",
"good",
"model",
"and",
"search",
"among",
"a",
"space",
"of",
"hyper",
"-",
"parameters"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_incremental.py#L273-L402 | train | 216,124 |
dask/dask-ml | dask_ml/model_selection/_incremental.py | BaseIncrementalSearchCV._check_array | def _check_array(self, X, **kwargs):
"""Validate the data arguments X and y.
By default, NumPy arrays are converted to 1-block dask arrays.
Parameters
----------
X, y : array-like
"""
if isinstance(X, np.ndarray):
X = da.from_array(X, X.shape)
X = check_array(X, **kwargs)
return X | python | def _check_array(self, X, **kwargs):
"""Validate the data arguments X and y.
By default, NumPy arrays are converted to 1-block dask arrays.
Parameters
----------
X, y : array-like
"""
if isinstance(X, np.ndarray):
X = da.from_array(X, X.shape)
X = check_array(X, **kwargs)
return X | [
"def",
"_check_array",
"(",
"self",
",",
"X",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
":",
"X",
"=",
"da",
".",
"from_array",
"(",
"X",
",",
"X",
".",
"shape",
")",
"X",
"=",
"check_array... | Validate the data arguments X and y.
By default, NumPy arrays are converted to 1-block dask arrays.
Parameters
----------
X, y : array-like | [
"Validate",
"the",
"data",
"arguments",
"X",
"and",
"y",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_incremental.py#L432-L444 | train | 216,125 |
dask/dask-ml | dask_ml/model_selection/_incremental.py | BaseIncrementalSearchCV.fit | def fit(self, X, y, **fit_params):
"""Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator.
"""
return default_client().sync(self._fit, X, y, **fit_params) | python | def fit(self, X, y, **fit_params):
"""Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator.
"""
return default_client().sync(self._fit, X, y, **fit_params) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"*",
"*",
"fit_params",
")",
":",
"return",
"default_client",
"(",
")",
".",
"sync",
"(",
"self",
".",
"_fit",
",",
"X",
",",
"y",
",",
"*",
"*",
"fit_params",
")"
] | Find the best parameters for a particular model.
Parameters
----------
X, y : array-like
**fit_params
Additional partial fit keyword arguments for the estimator. | [
"Find",
"the",
"best",
"parameters",
"for",
"a",
"particular",
"model",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_incremental.py#L569-L578 | train | 216,126 |
dask/dask-ml | dask_ml/wrappers.py | ParallelPostFit._check_array | def _check_array(self, X):
"""Validate an array for post-fit tasks.
Parameters
----------
X : Union[Array, DataFrame]
Returns
-------
same type as 'X'
Notes
-----
The following checks are applied.
- Ensure that the array is blocked only along the samples.
"""
if isinstance(X, da.Array):
if X.ndim == 2 and X.numblocks[1] > 1:
logger.debug("auto-rechunking 'X'")
if not np.isnan(X.chunks[0]).any():
X = X.rechunk({0: "auto", 1: -1})
else:
X = X.rechunk({1: -1})
return X | python | def _check_array(self, X):
"""Validate an array for post-fit tasks.
Parameters
----------
X : Union[Array, DataFrame]
Returns
-------
same type as 'X'
Notes
-----
The following checks are applied.
- Ensure that the array is blocked only along the samples.
"""
if isinstance(X, da.Array):
if X.ndim == 2 and X.numblocks[1] > 1:
logger.debug("auto-rechunking 'X'")
if not np.isnan(X.chunks[0]).any():
X = X.rechunk({0: "auto", 1: -1})
else:
X = X.rechunk({1: -1})
return X | [
"def",
"_check_array",
"(",
"self",
",",
"X",
")",
":",
"if",
"isinstance",
"(",
"X",
",",
"da",
".",
"Array",
")",
":",
"if",
"X",
".",
"ndim",
"==",
"2",
"and",
"X",
".",
"numblocks",
"[",
"1",
"]",
">",
"1",
":",
"logger",
".",
"debug",
"(... | Validate an array for post-fit tasks.
Parameters
----------
X : Union[Array, DataFrame]
Returns
-------
same type as 'X'
Notes
-----
The following checks are applied.
- Ensure that the array is blocked only along the samples. | [
"Validate",
"an",
"array",
"for",
"post",
"-",
"fit",
"tasks",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/wrappers.py#L122-L146 | train | 216,127 |
dask/dask-ml | dask_ml/wrappers.py | ParallelPostFit.transform | def transform(self, X):
"""Transform block or partition-wise for dask inputs.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``transform`` method, then
an ``AttributeError`` is raised.
Parameters
----------
X : array-like
Returns
-------
transformed : array-like
"""
self._check_method("transform")
X = self._check_array(X)
if isinstance(X, da.Array):
return X.map_blocks(_transform, estimator=self._postfit_estimator)
elif isinstance(X, dd._Frame):
return X.map_partitions(_transform, estimator=self._postfit_estimator)
else:
return _transform(X, estimator=self._postfit_estimator) | python | def transform(self, X):
"""Transform block or partition-wise for dask inputs.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``transform`` method, then
an ``AttributeError`` is raised.
Parameters
----------
X : array-like
Returns
-------
transformed : array-like
"""
self._check_method("transform")
X = self._check_array(X)
if isinstance(X, da.Array):
return X.map_blocks(_transform, estimator=self._postfit_estimator)
elif isinstance(X, dd._Frame):
return X.map_partitions(_transform, estimator=self._postfit_estimator)
else:
return _transform(X, estimator=self._postfit_estimator) | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"_check_method",
"(",
"\"transform\"",
")",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"if",
"isinstance",
"(",
"X",
",",
"da",
".",
"Array",
")",
":",
"return",
"X",
".",... | Transform block or partition-wise for dask inputs.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``transform`` method, then
an ``AttributeError`` is raised.
Parameters
----------
X : array-like
Returns
-------
transformed : array-like | [
"Transform",
"block",
"or",
"partition",
"-",
"wise",
"for",
"dask",
"inputs",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/wrappers.py#L185-L211 | train | 216,128 |
dask/dask-ml | dask_ml/wrappers.py | ParallelPostFit.score | def score(self, X, y, compute=True):
"""Returns the score on the given data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
return self.estimator.score(X, y)
"""
scoring = self.scoring
X = self._check_array(X)
y = self._check_array(y)
if not scoring:
if type(self._postfit_estimator).score == sklearn.base.RegressorMixin.score:
scoring = "r2"
elif (
type(self._postfit_estimator).score
== sklearn.base.ClassifierMixin.score
):
scoring = "accuracy"
else:
scoring = self.scoring
if scoring:
if not dask.is_dask_collection(X) and not dask.is_dask_collection(y):
scorer = sklearn.metrics.get_scorer(scoring)
else:
scorer = get_scorer(scoring, compute=compute)
return scorer(self, X, y)
else:
return self._postfit_estimator.score(X, y) | python | def score(self, X, y, compute=True):
"""Returns the score on the given data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
return self.estimator.score(X, y)
"""
scoring = self.scoring
X = self._check_array(X)
y = self._check_array(y)
if not scoring:
if type(self._postfit_estimator).score == sklearn.base.RegressorMixin.score:
scoring = "r2"
elif (
type(self._postfit_estimator).score
== sklearn.base.ClassifierMixin.score
):
scoring = "accuracy"
else:
scoring = self.scoring
if scoring:
if not dask.is_dask_collection(X) and not dask.is_dask_collection(y):
scorer = sklearn.metrics.get_scorer(scoring)
else:
scorer = get_scorer(scoring, compute=compute)
return scorer(self, X, y)
else:
return self._postfit_estimator.score(X, y) | [
"def",
"score",
"(",
"self",
",",
"X",
",",
"y",
",",
"compute",
"=",
"True",
")",
":",
"scoring",
"=",
"self",
".",
"scoring",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"y",
"=",
"self",
".",
"_check_array",
"(",
"y",
")",
"if",
"n... | Returns the score on the given data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
return self.estimator.score(X, y) | [
"Returns",
"the",
"score",
"on",
"the",
"given",
"data",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/wrappers.py#L213-L253 | train | 216,129 |
dask/dask-ml | dask_ml/wrappers.py | ParallelPostFit.predict | def predict(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like
"""
self._check_method("predict")
X = self._check_array(X)
if isinstance(X, da.Array):
result = X.map_blocks(
_predict, dtype="int", estimator=self._postfit_estimator, drop_axis=1
)
return result
elif isinstance(X, dd._Frame):
return X.map_partitions(
_predict, estimator=self._postfit_estimator, meta=np.array([1])
)
else:
return _predict(X, estimator=self._postfit_estimator) | python | def predict(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like
"""
self._check_method("predict")
X = self._check_array(X)
if isinstance(X, da.Array):
result = X.map_blocks(
_predict, dtype="int", estimator=self._postfit_estimator, drop_axis=1
)
return result
elif isinstance(X, dd._Frame):
return X.map_partitions(
_predict, estimator=self._postfit_estimator, meta=np.array([1])
)
else:
return _predict(X, estimator=self._postfit_estimator) | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"_check_method",
"(",
"\"predict\"",
")",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"if",
"isinstance",
"(",
"X",
",",
"da",
".",
"Array",
")",
":",
"result",
"=",
"X",
"... | Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like | [
"Predict",
"for",
"X",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/wrappers.py#L255-L285 | train | 216,130 |
dask/dask-ml | dask_ml/wrappers.py | ParallelPostFit.predict_log_proba | def predict_log_proba(self, X):
"""Log of proability estimates.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``predict_proba``
method, then an ``AttributeError`` is raised.
Parameters
----------
X : array or dataframe
Returns
-------
y : array-like
"""
self._check_method("predict_log_proba")
return da.log(self.predict_proba(X)) | python | def predict_log_proba(self, X):
"""Log of proability estimates.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``predict_proba``
method, then an ``AttributeError`` is raised.
Parameters
----------
X : array or dataframe
Returns
-------
y : array-like
"""
self._check_method("predict_log_proba")
return da.log(self.predict_proba(X)) | [
"def",
"predict_log_proba",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"_check_method",
"(",
"\"predict_log_proba\"",
")",
"return",
"da",
".",
"log",
"(",
"self",
".",
"predict_proba",
"(",
"X",
")",
")"
] | Log of proability estimates.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``predict_proba``
method, then an ``AttributeError`` is raised.
Parameters
----------
X : array or dataframe
Returns
-------
y : array-like | [
"Log",
"of",
"proability",
"estimates",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/wrappers.py#L322-L341 | train | 216,131 |
dask/dask-ml | dask_ml/wrappers.py | ParallelPostFit._check_method | def _check_method(self, method):
"""Check if self.estimator has 'method'.
Raises
------
AttributeError
"""
estimator = self._postfit_estimator
if not hasattr(estimator, method):
msg = "The wrapped estimator '{}' does not have a '{}' method.".format(
estimator, method
)
raise AttributeError(msg)
return getattr(estimator, method) | python | def _check_method(self, method):
"""Check if self.estimator has 'method'.
Raises
------
AttributeError
"""
estimator = self._postfit_estimator
if not hasattr(estimator, method):
msg = "The wrapped estimator '{}' does not have a '{}' method.".format(
estimator, method
)
raise AttributeError(msg)
return getattr(estimator, method) | [
"def",
"_check_method",
"(",
"self",
",",
"method",
")",
":",
"estimator",
"=",
"self",
".",
"_postfit_estimator",
"if",
"not",
"hasattr",
"(",
"estimator",
",",
"method",
")",
":",
"msg",
"=",
"\"The wrapped estimator '{}' does not have a '{}' method.\"",
".",
"f... | Check if self.estimator has 'method'.
Raises
------
AttributeError | [
"Check",
"if",
"self",
".",
"estimator",
"has",
"method",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/wrappers.py#L343-L356 | train | 216,132 |
dask/dask-ml | dask_ml/linear_model/glm.py | _GLM.fit | def fit(self, X, y=None):
"""Fit the model on the training data
Parameters
----------
X: array-like, shape (n_samples, n_features)
y : array-like, shape (n_samples,)
Returns
-------
self : objectj
"""
X = self._check_array(X)
solver_kwargs = self._get_solver_kwargs()
self._coef = algorithms._solvers[self.solver](X, y, **solver_kwargs)
if self.fit_intercept:
self.coef_ = self._coef[:-1]
self.intercept_ = self._coef[-1]
else:
self.coef_ = self._coef
return self | python | def fit(self, X, y=None):
"""Fit the model on the training data
Parameters
----------
X: array-like, shape (n_samples, n_features)
y : array-like, shape (n_samples,)
Returns
-------
self : objectj
"""
X = self._check_array(X)
solver_kwargs = self._get_solver_kwargs()
self._coef = algorithms._solvers[self.solver](X, y, **solver_kwargs)
if self.fit_intercept:
self.coef_ = self._coef[:-1]
self.intercept_ = self._coef[-1]
else:
self.coef_ = self._coef
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"solver_kwargs",
"=",
"self",
".",
"_get_solver_kwargs",
"(",
")",
"self",
".",
"_coef",
"=",
"algorithms",
".",
"_solvers",
... | Fit the model on the training data
Parameters
----------
X: array-like, shape (n_samples, n_features)
y : array-like, shape (n_samples,)
Returns
-------
self : objectj | [
"Fit",
"the",
"model",
"on",
"the",
"training",
"data"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/linear_model/glm.py#L171-L193 | train | 216,133 |
dask/dask-ml | dask_ml/linear_model/glm.py | LogisticRegression.predict_proba | def predict_proba(self, X):
"""Probability estimates for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
The probability of the sample for each class in the model.
"""
X_ = self._check_array(X)
return sigmoid(dot(X_, self._coef)) | python | def predict_proba(self, X):
"""Probability estimates for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
The probability of the sample for each class in the model.
"""
X_ = self._check_array(X)
return sigmoid(dot(X_, self._coef)) | [
"def",
"predict_proba",
"(",
"self",
",",
"X",
")",
":",
"X_",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"return",
"sigmoid",
"(",
"dot",
"(",
"X_",
",",
"self",
".",
"_coef",
")",
")"
] | Probability estimates for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
The probability of the sample for each class in the model. | [
"Probability",
"estimates",
"for",
"samples",
"in",
"X",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/linear_model/glm.py#L235-L248 | train | 216,134 |
dask/dask-ml | dask_ml/linear_model/glm.py | PoissonRegression.predict | def predict(self, X):
"""Predict count for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples,]
Predicted count for each sample
"""
X_ = self._check_array(X)
return exp(dot(X_, self._coef)) | python | def predict(self, X):
"""Predict count for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples,]
Predicted count for each sample
"""
X_ = self._check_array(X)
return exp(dot(X_, self._coef)) | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"X_",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"return",
"exp",
"(",
"dot",
"(",
"X_",
",",
"self",
".",
"_coef",
")",
")"
] | Predict count for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples,]
Predicted count for each sample | [
"Predict",
"count",
"for",
"samples",
"in",
"X",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/linear_model/glm.py#L348-L361 | train | 216,135 |
dask/dask-ml | dask_ml/cluster/k_means.py | k_means | def k_means(
X,
n_clusters,
init="k-means||",
precompute_distances="auto",
n_init=1,
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
n_jobs=-1,
algorithm="full",
return_n_iter=False,
oversampling_factor=2,
init_max_iter=None,
):
"""K-means algorithm for clustering
Differences from scikit-learn:
* init='k-means||'
* oversampling_factor keyword
* n_jobs=-1
"""
labels, inertia, centers, n_iter = _kmeans_single_lloyd(
X,
n_clusters,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
random_state=random_state,
oversampling_factor=oversampling_factor,
init_max_iter=init_max_iter,
)
if return_n_iter:
return labels, centers, inertia, n_iter
else:
return labels, centers, inertia | python | def k_means(
X,
n_clusters,
init="k-means||",
precompute_distances="auto",
n_init=1,
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
n_jobs=-1,
algorithm="full",
return_n_iter=False,
oversampling_factor=2,
init_max_iter=None,
):
"""K-means algorithm for clustering
Differences from scikit-learn:
* init='k-means||'
* oversampling_factor keyword
* n_jobs=-1
"""
labels, inertia, centers, n_iter = _kmeans_single_lloyd(
X,
n_clusters,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
random_state=random_state,
oversampling_factor=oversampling_factor,
init_max_iter=init_max_iter,
)
if return_n_iter:
return labels, centers, inertia, n_iter
else:
return labels, centers, inertia | [
"def",
"k_means",
"(",
"X",
",",
"n_clusters",
",",
"init",
"=",
"\"k-means||\"",
",",
"precompute_distances",
"=",
"\"auto\"",
",",
"n_init",
"=",
"1",
",",
"max_iter",
"=",
"300",
",",
"verbose",
"=",
"False",
",",
"tol",
"=",
"1e-4",
",",
"random_stat... | K-means algorithm for clustering
Differences from scikit-learn:
* init='k-means||'
* oversampling_factor keyword
* n_jobs=-1 | [
"K",
"-",
"means",
"algorithm",
"for",
"clustering"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L236-L275 | train | 216,136 |
dask/dask-ml | dask_ml/cluster/k_means.py | k_init | def k_init(
X,
n_clusters,
init="k-means||",
random_state=None,
max_iter=None,
oversampling_factor=2,
):
"""Choose the initial centers for K-Means.
Parameters
----------
X : da.Array (n_samples, n_features)
n_clusters : int
Number of clusters to end up with
init : {'k-means||', 'k-means++', 'random'} or numpy.ndarray
Initialization method, or pass a NumPy array to use
random_state : int, optional
max_iter : int, optional
Only used for ``init='k-means||'``.
oversampling_factor : int, optional
Only used for ``init='k-means||`''. Controls the additional number of
candidate centers in each iteration.
Return
------
centers : np.ndarray (n_clusters, n_features)
Notes
-----
The default strategy is ``k-means||``, which tends to be slower than
``k-means++`` for small (in-memory) datasets, but works better in a
distributed setting.
.. warning::
Using ``init='k-means++'`` assumes that the entire dataset fits
in RAM.
"""
if isinstance(init, np.ndarray):
K, P = init.shape
if K != n_clusters:
msg = (
"Number of centers in provided 'init' ({}) does "
"not match 'n_clusters' ({})"
)
raise ValueError(msg.format(K, n_clusters))
if P != X.shape[1]:
msg = (
"Number of features in the provided 'init' ({}) do not "
"match the number of features in 'X'"
)
raise ValueError(msg.format(P, X.shape[1]))
return init
elif not isinstance(init, str):
raise TypeError("'init' must be an array or str, got {}".format(type(init)))
valid = {"k-means||", "k-means++", "random"}
if isinstance(random_state, Integral) or random_state is None:
if init == "k-means||":
random_state = da.random.RandomState(random_state)
else:
random_state = np.random.RandomState(random_state)
if init == "k-means||":
return init_scalable(X, n_clusters, random_state, max_iter, oversampling_factor)
elif init == "k-means++":
return init_pp(X, n_clusters, random_state)
elif init == "random":
return init_random(X, n_clusters, random_state)
else:
raise ValueError("'init' must be one of {}, got {}".format(valid, init)) | python | def k_init(
X,
n_clusters,
init="k-means||",
random_state=None,
max_iter=None,
oversampling_factor=2,
):
"""Choose the initial centers for K-Means.
Parameters
----------
X : da.Array (n_samples, n_features)
n_clusters : int
Number of clusters to end up with
init : {'k-means||', 'k-means++', 'random'} or numpy.ndarray
Initialization method, or pass a NumPy array to use
random_state : int, optional
max_iter : int, optional
Only used for ``init='k-means||'``.
oversampling_factor : int, optional
Only used for ``init='k-means||`''. Controls the additional number of
candidate centers in each iteration.
Return
------
centers : np.ndarray (n_clusters, n_features)
Notes
-----
The default strategy is ``k-means||``, which tends to be slower than
``k-means++`` for small (in-memory) datasets, but works better in a
distributed setting.
.. warning::
Using ``init='k-means++'`` assumes that the entire dataset fits
in RAM.
"""
if isinstance(init, np.ndarray):
K, P = init.shape
if K != n_clusters:
msg = (
"Number of centers in provided 'init' ({}) does "
"not match 'n_clusters' ({})"
)
raise ValueError(msg.format(K, n_clusters))
if P != X.shape[1]:
msg = (
"Number of features in the provided 'init' ({}) do not "
"match the number of features in 'X'"
)
raise ValueError(msg.format(P, X.shape[1]))
return init
elif not isinstance(init, str):
raise TypeError("'init' must be an array or str, got {}".format(type(init)))
valid = {"k-means||", "k-means++", "random"}
if isinstance(random_state, Integral) or random_state is None:
if init == "k-means||":
random_state = da.random.RandomState(random_state)
else:
random_state = np.random.RandomState(random_state)
if init == "k-means||":
return init_scalable(X, n_clusters, random_state, max_iter, oversampling_factor)
elif init == "k-means++":
return init_pp(X, n_clusters, random_state)
elif init == "random":
return init_random(X, n_clusters, random_state)
else:
raise ValueError("'init' must be one of {}, got {}".format(valid, init)) | [
"def",
"k_init",
"(",
"X",
",",
"n_clusters",
",",
"init",
"=",
"\"k-means||\"",
",",
"random_state",
"=",
"None",
",",
"max_iter",
"=",
"None",
",",
"oversampling_factor",
"=",
"2",
",",
")",
":",
"if",
"isinstance",
"(",
"init",
",",
"np",
".",
"ndar... | Choose the initial centers for K-Means.
Parameters
----------
X : da.Array (n_samples, n_features)
n_clusters : int
Number of clusters to end up with
init : {'k-means||', 'k-means++', 'random'} or numpy.ndarray
Initialization method, or pass a NumPy array to use
random_state : int, optional
max_iter : int, optional
Only used for ``init='k-means||'``.
oversampling_factor : int, optional
Only used for ``init='k-means||`''. Controls the additional number of
candidate centers in each iteration.
Return
------
centers : np.ndarray (n_clusters, n_features)
Notes
-----
The default strategy is ``k-means||``, which tends to be slower than
``k-means++`` for small (in-memory) datasets, but works better in a
distributed setting.
.. warning::
Using ``init='k-means++'`` assumes that the entire dataset fits
in RAM. | [
"Choose",
"the",
"initial",
"centers",
"for",
"K",
"-",
"Means",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L291-L369 | train | 216,137 |
dask/dask-ml | dask_ml/cluster/k_means.py | init_pp | def init_pp(X, n_clusters, random_state):
"""K-means initialization using k-means++
This uses scikit-learn's implementation.
"""
x_squared_norms = row_norms(X, squared=True).compute()
logger.info("Initializing with k-means++")
with _timer("initialization of %2d centers" % n_clusters, _logger=logger):
centers = sk_k_means._k_init(
X, n_clusters, random_state=random_state, x_squared_norms=x_squared_norms
)
return centers | python | def init_pp(X, n_clusters, random_state):
"""K-means initialization using k-means++
This uses scikit-learn's implementation.
"""
x_squared_norms = row_norms(X, squared=True).compute()
logger.info("Initializing with k-means++")
with _timer("initialization of %2d centers" % n_clusters, _logger=logger):
centers = sk_k_means._k_init(
X, n_clusters, random_state=random_state, x_squared_norms=x_squared_norms
)
return centers | [
"def",
"init_pp",
"(",
"X",
",",
"n_clusters",
",",
"random_state",
")",
":",
"x_squared_norms",
"=",
"row_norms",
"(",
"X",
",",
"squared",
"=",
"True",
")",
".",
"compute",
"(",
")",
"logger",
".",
"info",
"(",
"\"Initializing with k-means++\"",
")",
"wi... | K-means initialization using k-means++
This uses scikit-learn's implementation. | [
"K",
"-",
"means",
"initialization",
"using",
"k",
"-",
"means",
"++"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L372-L384 | train | 216,138 |
dask/dask-ml | dask_ml/cluster/k_means.py | init_random | def init_random(X, n_clusters, random_state):
"""K-means initialization using randomly chosen points"""
logger.info("Initializing randomly")
idx = sorted(draw_seed(random_state, 0, len(X), size=n_clusters))
centers = X[idx].compute()
return centers | python | def init_random(X, n_clusters, random_state):
"""K-means initialization using randomly chosen points"""
logger.info("Initializing randomly")
idx = sorted(draw_seed(random_state, 0, len(X), size=n_clusters))
centers = X[idx].compute()
return centers | [
"def",
"init_random",
"(",
"X",
",",
"n_clusters",
",",
"random_state",
")",
":",
"logger",
".",
"info",
"(",
"\"Initializing randomly\"",
")",
"idx",
"=",
"sorted",
"(",
"draw_seed",
"(",
"random_state",
",",
"0",
",",
"len",
"(",
"X",
")",
",",
"size",... | K-means initialization using randomly chosen points | [
"K",
"-",
"means",
"initialization",
"using",
"randomly",
"chosen",
"points"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L388-L393 | train | 216,139 |
dask/dask-ml | dask_ml/cluster/k_means.py | init_scalable | def init_scalable(
X, n_clusters, random_state=None, max_iter=None, oversampling_factor=2
):
"""K-Means initialization using k-means||
This is algorithm 2 in Scalable K-Means++ (2012).
"""
logger.info("Initializing with k-means||")
# Step 1: Initialize Centers
idx = 0
centers = da.compute(X[idx, np.newaxis])[0]
c_idx = {idx}
# Step 2: Initialize cost
cost, = compute(evaluate_cost(X, centers))
if cost == 0:
n_iter = 0
else:
n_iter = int(np.round(np.log(cost)))
if max_iter is not None:
n_iter = min(max_iter, n_iter)
# Steps 3 - 6: update candidate Centers
for i in range(n_iter):
with _timer(
"init iteration %2d/%2d , %2d centers" % (i + 1, n_iter, len(c_idx)),
_logger=logger,
):
new_idxs = _sample_points(X, centers, oversampling_factor, random_state)
new_idxs = set(*compute(new_idxs))
c_idx |= new_idxs
# Sort before slicing, for better performance / memory
# usage with the scheduler.
# See https://github.com/dask/dask-ml/issues/39
centers = X[sorted(c_idx)].compute()
# XXX: scikit-learn doesn't have weighted k-means.
# The paper weights each center by the number of points closest to it.
# https://stackoverflow.com/a/37198799/1889400 claims you can scale the
# features before clustering, but that doesn't seem right.
# I think that replicating the *points*, proportional to the number of
# original points closest to the candidate centers, would be a better way
# to do that.
if len(centers) < n_clusters:
logger.warning("Found fewer than %d clusters in init.", n_clusters)
# supplement with random
need = n_clusters - len(centers)
locs = sorted(
random_state.choice(
np.arange(0, len(X)), size=need, replace=False, chunks=len(X)
)
)
extra = X[locs].compute()
return np.vstack([centers, extra])
else:
# Step 7, 8 without weights
# dask RandomState objects aren't valid for scikit-learn
rng2 = (
random_state.randint(0, np.iinfo("i4").max - 1, chunks=())
.compute(scheduler="single-threaded")
.item()
)
km = sk_k_means.KMeans(n_clusters, random_state=rng2)
km.fit(centers)
return km.cluster_centers_ | python | def init_scalable(
X, n_clusters, random_state=None, max_iter=None, oversampling_factor=2
):
"""K-Means initialization using k-means||
This is algorithm 2 in Scalable K-Means++ (2012).
"""
logger.info("Initializing with k-means||")
# Step 1: Initialize Centers
idx = 0
centers = da.compute(X[idx, np.newaxis])[0]
c_idx = {idx}
# Step 2: Initialize cost
cost, = compute(evaluate_cost(X, centers))
if cost == 0:
n_iter = 0
else:
n_iter = int(np.round(np.log(cost)))
if max_iter is not None:
n_iter = min(max_iter, n_iter)
# Steps 3 - 6: update candidate Centers
for i in range(n_iter):
with _timer(
"init iteration %2d/%2d , %2d centers" % (i + 1, n_iter, len(c_idx)),
_logger=logger,
):
new_idxs = _sample_points(X, centers, oversampling_factor, random_state)
new_idxs = set(*compute(new_idxs))
c_idx |= new_idxs
# Sort before slicing, for better performance / memory
# usage with the scheduler.
# See https://github.com/dask/dask-ml/issues/39
centers = X[sorted(c_idx)].compute()
# XXX: scikit-learn doesn't have weighted k-means.
# The paper weights each center by the number of points closest to it.
# https://stackoverflow.com/a/37198799/1889400 claims you can scale the
# features before clustering, but that doesn't seem right.
# I think that replicating the *points*, proportional to the number of
# original points closest to the candidate centers, would be a better way
# to do that.
if len(centers) < n_clusters:
logger.warning("Found fewer than %d clusters in init.", n_clusters)
# supplement with random
need = n_clusters - len(centers)
locs = sorted(
random_state.choice(
np.arange(0, len(X)), size=need, replace=False, chunks=len(X)
)
)
extra = X[locs].compute()
return np.vstack([centers, extra])
else:
# Step 7, 8 without weights
# dask RandomState objects aren't valid for scikit-learn
rng2 = (
random_state.randint(0, np.iinfo("i4").max - 1, chunks=())
.compute(scheduler="single-threaded")
.item()
)
km = sk_k_means.KMeans(n_clusters, random_state=rng2)
km.fit(centers)
return km.cluster_centers_ | [
"def",
"init_scalable",
"(",
"X",
",",
"n_clusters",
",",
"random_state",
"=",
"None",
",",
"max_iter",
"=",
"None",
",",
"oversampling_factor",
"=",
"2",
")",
":",
"logger",
".",
"info",
"(",
"\"Initializing with k-means||\"",
")",
"# Step 1: Initialize Centers",... | K-Means initialization using k-means||
This is algorithm 2 in Scalable K-Means++ (2012). | [
"K",
"-",
"Means",
"initialization",
"using",
"k",
"-",
"means||"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L397-L467 | train | 216,140 |
dask/dask-ml | dask_ml/cluster/k_means.py | _sample_points | def _sample_points(X, centers, oversampling_factor, random_state):
r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})}
"""
# re-implement evaluate_cost here, to avoid redundant computation
distances = pairwise_distances(X, centers).min(1) ** 2
denom = distances.sum()
p = oversampling_factor * distances / denom
draws = random_state.uniform(size=len(p), chunks=p.chunks)
picked = p > draws
new_idxs, = da.where(picked)
return new_idxs | python | def _sample_points(X, centers, oversampling_factor, random_state):
r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})}
"""
# re-implement evaluate_cost here, to avoid redundant computation
distances = pairwise_distances(X, centers).min(1) ** 2
denom = distances.sum()
p = oversampling_factor * distances / denom
draws = random_state.uniform(size=len(p), chunks=p.chunks)
picked = p > draws
new_idxs, = da.where(picked)
return new_idxs | [
"def",
"_sample_points",
"(",
"X",
",",
"centers",
",",
"oversampling_factor",
",",
"random_state",
")",
":",
"# re-implement evaluate_cost here, to avoid redundant computation",
"distances",
"=",
"pairwise_distances",
"(",
"X",
",",
"centers",
")",
".",
"min",
"(",
"... | r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})} | [
"r",
"Sample",
"points",
"independently",
"with",
"probability"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L476-L495 | train | 216,141 |
dask/dask-ml | dask_ml/preprocessing/data.py | RobustScaler.transform | def transform(self, X):
"""Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
This implementation was copied and modified from Scikit-Learn.
See License information here:
https://github.com/scikit-learn/scikit-learn/blob/master/README.rst
"""
if self.with_centering:
check_is_fitted(self, "center_")
if self.with_scaling:
check_is_fitted(self, "scale_")
X = self._check_array(X, self.copy)
# if sparse.issparse(X):
# if self.with_scaling:
# inplace_column_scale(X, 1.0 / self.scale_)
# else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X | python | def transform(self, X):
"""Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
This implementation was copied and modified from Scikit-Learn.
See License information here:
https://github.com/scikit-learn/scikit-learn/blob/master/README.rst
"""
if self.with_centering:
check_is_fitted(self, "center_")
if self.with_scaling:
check_is_fitted(self, "scale_")
X = self._check_array(X, self.copy)
# if sparse.issparse(X):
# if self.with_scaling:
# inplace_column_scale(X, 1.0 / self.scale_)
# else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"if",
"self",
".",
"with_centering",
":",
"check_is_fitted",
"(",
"self",
",",
"\"center_\"",
")",
"if",
"self",
".",
"with_scaling",
":",
"check_is_fitted",
"(",
"self",
",",
"\"scale_\"",
")",
"X",
"... | Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
This implementation was copied and modified from Scikit-Learn.
See License information here:
https://github.com/scikit-learn/scikit-learn/blob/master/README.rst | [
"Center",
"and",
"scale",
"the",
"data",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L168-L198 | train | 216,142 |
dask/dask-ml | dask_ml/preprocessing/data.py | RobustScaler.inverse_transform | def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
This implementation was copied and modified from Scikit-Learn.
See License information here:
https://github.com/scikit-learn/scikit-learn/blob/master/README.rst
"""
check_is_fitted(self, "center_", "scale_")
# if sparse.issparse(X):
# if self.with_scaling:
# inplace_column_scale(X, self.scale_)
# else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X | python | def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
This implementation was copied and modified from Scikit-Learn.
See License information here:
https://github.com/scikit-learn/scikit-learn/blob/master/README.rst
"""
check_is_fitted(self, "center_", "scale_")
# if sparse.issparse(X):
# if self.with_scaling:
# inplace_column_scale(X, self.scale_)
# else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X | [
"def",
"inverse_transform",
"(",
"self",
",",
"X",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"\"center_\"",
",",
"\"scale_\"",
")",
"# if sparse.issparse(X):",
"# if self.with_scaling:",
"# inplace_column_scale(X, self.scale_)",
"# else:",
"if",
"self",
"... | Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
This implementation was copied and modified from Scikit-Learn.
See License information here:
https://github.com/scikit-learn/scikit-learn/blob/master/README.rst | [
"Scale",
"back",
"the",
"data",
"to",
"the",
"original",
"representation"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L200-L223 | train | 216,143 |
dask/dask-ml | dask_ml/preprocessing/data.py | Categorizer.fit | def fit(self, X, y=None):
"""Find the categorical columns.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
self
"""
X = self._check_array(X)
if self.categories is not None:
# some basic validation
columns = pd.Index(self.categories)
categories = self.categories
elif isinstance(X, pd.DataFrame):
columns, categories = self._fit(X)
else:
columns, categories = self._fit_dask(X)
self.columns_ = columns
self.categories_ = categories
return self | python | def fit(self, X, y=None):
"""Find the categorical columns.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
self
"""
X = self._check_array(X)
if self.categories is not None:
# some basic validation
columns = pd.Index(self.categories)
categories = self.categories
elif isinstance(X, pd.DataFrame):
columns, categories = self._fit(X)
else:
columns, categories = self._fit_dask(X)
self.columns_ = columns
self.categories_ = categories
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"if",
"self",
".",
"categories",
"is",
"not",
"None",
":",
"# some basic validation",
"columns",
"=",
"pd",
".",
"Index",
"(... | Find the categorical columns.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
self | [
"Find",
"the",
"categorical",
"columns",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L401-L427 | train | 216,144 |
dask/dask-ml | dask_ml/preprocessing/data.py | Categorizer.transform | def transform(self, X, y=None):
"""Transform the columns in ``X`` according to ``self.categories_``.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
X_trn : pandas.DataFrame or dask.DataFrame
Same type as the input. The columns in ``self.categories_`` will
be converted to categorical dtype.
"""
check_is_fitted(self, "categories_")
X = self._check_array(X).copy()
categories = self.categories_
for k, dtype in categories.items():
if _HAS_CTD:
if not isinstance(dtype, pd.api.types.CategoricalDtype):
dtype = pd.api.types.CategoricalDtype(*dtype)
X[k] = X[k].astype(dtype)
else:
cat, ordered = dtype
X[k] = X[k].astype("category").cat.set_categories(cat, ordered)
return X | python | def transform(self, X, y=None):
"""Transform the columns in ``X`` according to ``self.categories_``.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
X_trn : pandas.DataFrame or dask.DataFrame
Same type as the input. The columns in ``self.categories_`` will
be converted to categorical dtype.
"""
check_is_fitted(self, "categories_")
X = self._check_array(X).copy()
categories = self.categories_
for k, dtype in categories.items():
if _HAS_CTD:
if not isinstance(dtype, pd.api.types.CategoricalDtype):
dtype = pd.api.types.CategoricalDtype(*dtype)
X[k] = X[k].astype(dtype)
else:
cat, ordered = dtype
X[k] = X[k].astype("category").cat.set_categories(cat, ordered)
return X | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"\"categories_\"",
")",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
".",
"copy",
"(",
")",
"categories",
"=",
"self",
".",
"c... | Transform the columns in ``X`` according to ``self.categories_``.
Parameters
----------
X : pandas.DataFrame or dask.DataFrame
y : ignored
Returns
-------
X_trn : pandas.DataFrame or dask.DataFrame
Same type as the input. The columns in ``self.categories_`` will
be converted to categorical dtype. | [
"Transform",
"the",
"columns",
"in",
"X",
"according",
"to",
"self",
".",
"categories_",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L455-L482 | train | 216,145 |
dask/dask-ml | dask_ml/preprocessing/data.py | DummyEncoder.fit | def fit(self, X, y=None):
"""Determine the categorical columns to be dummy encoded.
Parameters
----------
X : pandas.DataFrame or dask.dataframe.DataFrame
y : ignored
Returns
-------
self
"""
self.columns_ = X.columns
columns = self.columns
if columns is None:
columns = X.select_dtypes(include=["category"]).columns
else:
for column in columns:
assert is_categorical_dtype(X[column]), "Must be categorical"
self.categorical_columns_ = columns
self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_)
if _HAS_CTD:
self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_}
else:
self.dtypes_ = {
col: (X[col].cat.categories, X[col].cat.ordered)
for col in self.categorical_columns_
}
left = len(self.non_categorical_columns_)
self.categorical_blocks_ = {}
for col in self.categorical_columns_:
right = left + len(X[col].cat.categories)
if self.drop_first:
right -= 1
self.categorical_blocks_[col], left = slice(left, right), right
if isinstance(X, pd.DataFrame):
sample = X.iloc[:1]
else:
sample = X._meta_nonempty
self.transformed_columns_ = pd.get_dummies(
sample, drop_first=self.drop_first
).columns
return self | python | def fit(self, X, y=None):
"""Determine the categorical columns to be dummy encoded.
Parameters
----------
X : pandas.DataFrame or dask.dataframe.DataFrame
y : ignored
Returns
-------
self
"""
self.columns_ = X.columns
columns = self.columns
if columns is None:
columns = X.select_dtypes(include=["category"]).columns
else:
for column in columns:
assert is_categorical_dtype(X[column]), "Must be categorical"
self.categorical_columns_ = columns
self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_)
if _HAS_CTD:
self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_}
else:
self.dtypes_ = {
col: (X[col].cat.categories, X[col].cat.ordered)
for col in self.categorical_columns_
}
left = len(self.non_categorical_columns_)
self.categorical_blocks_ = {}
for col in self.categorical_columns_:
right = left + len(X[col].cat.categories)
if self.drop_first:
right -= 1
self.categorical_blocks_[col], left = slice(left, right), right
if isinstance(X, pd.DataFrame):
sample = X.iloc[:1]
else:
sample = X._meta_nonempty
self.transformed_columns_ = pd.get_dummies(
sample, drop_first=self.drop_first
).columns
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"self",
".",
"columns_",
"=",
"X",
".",
"columns",
"columns",
"=",
"self",
".",
"columns",
"if",
"columns",
"is",
"None",
":",
"columns",
"=",
"X",
".",
"select_dtypes",
"(",
"... | Determine the categorical columns to be dummy encoded.
Parameters
----------
X : pandas.DataFrame or dask.dataframe.DataFrame
y : ignored
Returns
-------
self | [
"Determine",
"the",
"categorical",
"columns",
"to",
"be",
"dummy",
"encoded",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L570-L617 | train | 216,146 |
dask/dask-ml | dask_ml/preprocessing/data.py | DummyEncoder.transform | def transform(self, X, y=None):
"""Dummy encode the categorical columns in X
Parameters
----------
X : pd.DataFrame or dd.DataFrame
y : ignored
Returns
-------
transformed : pd.DataFrame or dd.DataFrame
Same type as the input
"""
if not X.columns.equals(self.columns_):
raise ValueError(
"Columns of 'X' do not match the training "
"columns. Got {!r}, expected {!r}".format(X.columns, self.columns)
)
if isinstance(X, pd.DataFrame):
return pd.get_dummies(X, drop_first=self.drop_first, columns=self.columns)
elif isinstance(X, dd.DataFrame):
return dd.get_dummies(X, drop_first=self.drop_first, columns=self.columns)
else:
raise TypeError("Unexpected type {}".format(type(X))) | python | def transform(self, X, y=None):
"""Dummy encode the categorical columns in X
Parameters
----------
X : pd.DataFrame or dd.DataFrame
y : ignored
Returns
-------
transformed : pd.DataFrame or dd.DataFrame
Same type as the input
"""
if not X.columns.equals(self.columns_):
raise ValueError(
"Columns of 'X' do not match the training "
"columns. Got {!r}, expected {!r}".format(X.columns, self.columns)
)
if isinstance(X, pd.DataFrame):
return pd.get_dummies(X, drop_first=self.drop_first, columns=self.columns)
elif isinstance(X, dd.DataFrame):
return dd.get_dummies(X, drop_first=self.drop_first, columns=self.columns)
else:
raise TypeError("Unexpected type {}".format(type(X))) | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"not",
"X",
".",
"columns",
".",
"equals",
"(",
"self",
".",
"columns_",
")",
":",
"raise",
"ValueError",
"(",
"\"Columns of 'X' do not match the training \"",
"\"columns. Got ... | Dummy encode the categorical columns in X
Parameters
----------
X : pd.DataFrame or dd.DataFrame
y : ignored
Returns
-------
transformed : pd.DataFrame or dd.DataFrame
Same type as the input | [
"Dummy",
"encode",
"the",
"categorical",
"columns",
"in",
"X"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L619-L642 | train | 216,147 |
dask/dask-ml | dask_ml/preprocessing/data.py | DummyEncoder.inverse_transform | def inverse_transform(self, X):
"""Inverse dummy-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame
"""
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=self.transformed_columns_)
elif isinstance(X, da.Array):
# later on we concat(..., axis=1), which requires
# known divisions. Suboptimal, but I think unavoidable.
unknown = np.isnan(X.chunks[0]).any()
if unknown:
lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute()
X = X.copy()
chunks = (tuple(lengths), X.chunks[1])
X._chunks = chunks
X = dd.from_dask_array(X, columns=self.transformed_columns_)
big = isinstance(X, dd.DataFrame)
if big:
chunks = np.array(X.divisions)
chunks[-1] = chunks[-1] + 1
chunks = tuple(chunks[1:] - chunks[:-1])
non_cat = X[list(self.non_categorical_columns_)]
cats = []
for col in self.categorical_columns_:
slice_ = self.categorical_blocks_[col]
if _HAS_CTD:
dtype = self.dtypes_[col]
categories, ordered = dtype.categories, dtype.ordered
else:
categories, ordered = self.dtypes_[col]
# use .values to avoid warning from pandas
cols_slice = list(X.columns[slice_])
if big:
inds = X[cols_slice].to_dask_array(lengths=chunks)
else:
inds = X[cols_slice].values
codes = inds.argmax(1)
if self.drop_first:
codes += 1
codes[(inds == 0).all(1)] = 0
if big:
# dask
codes._chunks = (chunks,)
# Need a Categorical.from_codes for dask
series = (
dd.from_dask_array(codes, columns=col)
.astype("category")
.cat.set_categories(np.arange(len(categories)), ordered=ordered)
.cat.rename_categories(categories)
)
# Bug in pandas <= 0.20.3 lost name
if series.name is None:
series.name = col
series.divisions = X.divisions
else:
# pandas
series = pd.Series(
pd.Categorical.from_codes(codes, categories, ordered=ordered),
name=col,
)
cats.append(series)
if big:
df = dd.concat([non_cat] + cats, axis=1)[list(self.columns_)]
else:
df = pd.concat([non_cat] + cats, axis=1)[self.columns_]
return df | python | def inverse_transform(self, X):
"""Inverse dummy-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame
"""
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=self.transformed_columns_)
elif isinstance(X, da.Array):
# later on we concat(..., axis=1), which requires
# known divisions. Suboptimal, but I think unavoidable.
unknown = np.isnan(X.chunks[0]).any()
if unknown:
lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute()
X = X.copy()
chunks = (tuple(lengths), X.chunks[1])
X._chunks = chunks
X = dd.from_dask_array(X, columns=self.transformed_columns_)
big = isinstance(X, dd.DataFrame)
if big:
chunks = np.array(X.divisions)
chunks[-1] = chunks[-1] + 1
chunks = tuple(chunks[1:] - chunks[:-1])
non_cat = X[list(self.non_categorical_columns_)]
cats = []
for col in self.categorical_columns_:
slice_ = self.categorical_blocks_[col]
if _HAS_CTD:
dtype = self.dtypes_[col]
categories, ordered = dtype.categories, dtype.ordered
else:
categories, ordered = self.dtypes_[col]
# use .values to avoid warning from pandas
cols_slice = list(X.columns[slice_])
if big:
inds = X[cols_slice].to_dask_array(lengths=chunks)
else:
inds = X[cols_slice].values
codes = inds.argmax(1)
if self.drop_first:
codes += 1
codes[(inds == 0).all(1)] = 0
if big:
# dask
codes._chunks = (chunks,)
# Need a Categorical.from_codes for dask
series = (
dd.from_dask_array(codes, columns=col)
.astype("category")
.cat.set_categories(np.arange(len(categories)), ordered=ordered)
.cat.rename_categories(categories)
)
# Bug in pandas <= 0.20.3 lost name
if series.name is None:
series.name = col
series.divisions = X.divisions
else:
# pandas
series = pd.Series(
pd.Categorical.from_codes(codes, categories, ordered=ordered),
name=col,
)
cats.append(series)
if big:
df = dd.concat([non_cat] + cats, axis=1)[list(self.columns_)]
else:
df = pd.concat([non_cat] + cats, axis=1)[self.columns_]
return df | [
"def",
"inverse_transform",
"(",
"self",
",",
"X",
")",
":",
"if",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
":",
"X",
"=",
"pd",
".",
"DataFrame",
"(",
"X",
",",
"columns",
"=",
"self",
".",
"transformed_columns_",
")",
"elif",
"isinst... | Inverse dummy-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame | [
"Inverse",
"dummy",
"-",
"encode",
"the",
"columns",
"in",
"X"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L644-L729 | train | 216,148 |
dask/dask-ml | dask_ml/preprocessing/data.py | OrdinalEncoder.fit | def fit(self, X, y=None):
"""Determine the categorical columns to be encoded.
Parameters
----------
X : pandas.DataFrame or dask.dataframe.DataFrame
y : ignored
Returns
-------
self
"""
self.columns_ = X.columns
columns = self.columns
if columns is None:
columns = X.select_dtypes(include=["category"]).columns
else:
for column in columns:
assert is_categorical_dtype(X[column]), "Must be categorical"
self.categorical_columns_ = columns
self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_)
if _HAS_CTD:
self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_}
else:
self.dtypes_ = {
col: (X[col].cat.categories, X[col].cat.ordered)
for col in self.categorical_columns_
}
return self | python | def fit(self, X, y=None):
"""Determine the categorical columns to be encoded.
Parameters
----------
X : pandas.DataFrame or dask.dataframe.DataFrame
y : ignored
Returns
-------
self
"""
self.columns_ = X.columns
columns = self.columns
if columns is None:
columns = X.select_dtypes(include=["category"]).columns
else:
for column in columns:
assert is_categorical_dtype(X[column]), "Must be categorical"
self.categorical_columns_ = columns
self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_)
if _HAS_CTD:
self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_}
else:
self.dtypes_ = {
col: (X[col].cat.categories, X[col].cat.ordered)
for col in self.categorical_columns_
}
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"self",
".",
"columns_",
"=",
"X",
".",
"columns",
"columns",
"=",
"self",
".",
"columns",
"if",
"columns",
"is",
"None",
":",
"columns",
"=",
"X",
".",
"select_dtypes",
"(",
"... | Determine the categorical columns to be encoded.
Parameters
----------
X : pandas.DataFrame or dask.dataframe.DataFrame
y : ignored
Returns
-------
self | [
"Determine",
"the",
"categorical",
"columns",
"to",
"be",
"encoded",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L804-L835 | train | 216,149 |
dask/dask-ml | dask_ml/preprocessing/data.py | OrdinalEncoder.transform | def transform(self, X, y=None):
"""Ordinal encode the categorical columns in X
Parameters
----------
X : pd.DataFrame or dd.DataFrame
y : ignored
Returns
-------
transformed : pd.DataFrame or dd.DataFrame
Same type as the input
"""
if not X.columns.equals(self.columns_):
raise ValueError(
"Columns of 'X' do not match the training "
"columns. Got {!r}, expected {!r}".format(X.columns, self.columns)
)
if not isinstance(X, (pd.DataFrame, dd.DataFrame)):
raise TypeError("Unexpected type {}".format(type(X)))
X = X.copy()
for col in self.categorical_columns_:
X[col] = X[col].cat.codes
return X | python | def transform(self, X, y=None):
"""Ordinal encode the categorical columns in X
Parameters
----------
X : pd.DataFrame or dd.DataFrame
y : ignored
Returns
-------
transformed : pd.DataFrame or dd.DataFrame
Same type as the input
"""
if not X.columns.equals(self.columns_):
raise ValueError(
"Columns of 'X' do not match the training "
"columns. Got {!r}, expected {!r}".format(X.columns, self.columns)
)
if not isinstance(X, (pd.DataFrame, dd.DataFrame)):
raise TypeError("Unexpected type {}".format(type(X)))
X = X.copy()
for col in self.categorical_columns_:
X[col] = X[col].cat.codes
return X | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"not",
"X",
".",
"columns",
".",
"equals",
"(",
"self",
".",
"columns_",
")",
":",
"raise",
"ValueError",
"(",
"\"Columns of 'X' do not match the training \"",
"\"columns. Got ... | Ordinal encode the categorical columns in X
Parameters
----------
X : pd.DataFrame or dd.DataFrame
y : ignored
Returns
-------
transformed : pd.DataFrame or dd.DataFrame
Same type as the input | [
"Ordinal",
"encode",
"the",
"categorical",
"columns",
"in",
"X"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L837-L861 | train | 216,150 |
dask/dask-ml | dask_ml/preprocessing/data.py | OrdinalEncoder.inverse_transform | def inverse_transform(self, X):
"""Inverse ordinal-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame
"""
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=self.columns_)
elif isinstance(X, da.Array):
# later on we concat(..., axis=1), which requires
# known divisions. Suboptimal, but I think unavoidable.
unknown = np.isnan(X.chunks[0]).any()
if unknown:
lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute()
X = X.copy()
chunks = (tuple(lengths), X.chunks[1])
X._chunks = chunks
X = dd.from_dask_array(X, columns=self.columns_)
big = isinstance(X, dd.DataFrame)
if big:
chunks = np.array(X.divisions)
chunks[-1] = chunks[-1] + 1
chunks = tuple(chunks[1:] - chunks[:-1])
X = X.copy()
for col in self.categorical_columns_:
if _HAS_CTD:
dtype = self.dtypes_[col]
categories, ordered = dtype.categories, dtype.ordered
else:
categories, ordered = self.dtypes_[col]
# use .values to avoid warning from pandas
codes = X[col].values
if big:
# dask
codes._chunks = (chunks,)
# Need a Categorical.from_codes for dask
series = (
dd.from_dask_array(codes, columns=col)
.astype("category")
.cat.set_categories(np.arange(len(categories)), ordered=ordered)
.cat.rename_categories(categories)
)
# Bug in pandas <= 0.20.3 lost name
if series.name is None:
series.name = col
series.divisions = X.divisions
else:
# pandas
series = pd.Series(
pd.Categorical.from_codes(codes, categories, ordered=ordered),
name=col,
)
X[col] = series
return X | python | def inverse_transform(self, X):
"""Inverse ordinal-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame
"""
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=self.columns_)
elif isinstance(X, da.Array):
# later on we concat(..., axis=1), which requires
# known divisions. Suboptimal, but I think unavoidable.
unknown = np.isnan(X.chunks[0]).any()
if unknown:
lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute()
X = X.copy()
chunks = (tuple(lengths), X.chunks[1])
X._chunks = chunks
X = dd.from_dask_array(X, columns=self.columns_)
big = isinstance(X, dd.DataFrame)
if big:
chunks = np.array(X.divisions)
chunks[-1] = chunks[-1] + 1
chunks = tuple(chunks[1:] - chunks[:-1])
X = X.copy()
for col in self.categorical_columns_:
if _HAS_CTD:
dtype = self.dtypes_[col]
categories, ordered = dtype.categories, dtype.ordered
else:
categories, ordered = self.dtypes_[col]
# use .values to avoid warning from pandas
codes = X[col].values
if big:
# dask
codes._chunks = (chunks,)
# Need a Categorical.from_codes for dask
series = (
dd.from_dask_array(codes, columns=col)
.astype("category")
.cat.set_categories(np.arange(len(categories)), ordered=ordered)
.cat.rename_categories(categories)
)
# Bug in pandas <= 0.20.3 lost name
if series.name is None:
series.name = col
series.divisions = X.divisions
else:
# pandas
series = pd.Series(
pd.Categorical.from_codes(codes, categories, ordered=ordered),
name=col,
)
X[col] = series
return X | [
"def",
"inverse_transform",
"(",
"self",
",",
"X",
")",
":",
"if",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
":",
"X",
"=",
"pd",
".",
"DataFrame",
"(",
"X",
",",
"columns",
"=",
"self",
".",
"columns_",
")",
"elif",
"isinstance",
"("... | Inverse ordinal-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame | [
"Inverse",
"ordinal",
"-",
"encode",
"the",
"columns",
"in",
"X"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L863-L933 | train | 216,151 |
dask/dask-ml | dask_ml/_partial.py | fit | def fit(model, x, y, compute=True, shuffle_blocks=True, random_state=None, **kwargs):
""" Fit scikit learn model against dask arrays
Model must support the ``partial_fit`` interface for online or batch
learning.
Ideally your rows are independent and identically distributed. By default,
this function will step through chunks of the arrays in random order.
Parameters
----------
model: sklearn model
Any model supporting partial_fit interface
x: dask Array
Two dimensional array, likely tall and skinny
y: dask Array
One dimensional array with same chunks as x's rows
compute : bool
Whether to compute this result
shuffle_blocks : bool
Whether to shuffle the blocks with ``random_state`` or not
random_state : int or numpy.random.RandomState
Random state to use when shuffling blocks
kwargs:
options to pass to partial_fit
Examples
--------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.randint(0, 2, 10, chunks=(5,))
>>> from sklearn.linear_model import SGDClassifier
>>> sgd = SGDClassifier()
>>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0])
>>> sgd # doctest: +SKIP
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False, verbose=0, warm_start=False)
This passes all of X and y through the classifier sequentially. We can use
the classifier as normal on in-memory data
>>> import numpy as np
>>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP
array([1, 0, 0, 1])
Or predict on a larger dataset
>>> z = da.random.random((400, 3), chunks=(100, 3))
>>> da.learn.predict(sgd, z) # doctest: +SKIP
dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64>
"""
if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"):
x = x.to_dask_array()
assert x.ndim == 2
if y is not None:
if not hasattr(y, "chunks") and hasattr(y, "to_dask_array"):
y = y.to_dask_array()
assert y.ndim == 1
assert x.chunks[0] == y.chunks[0]
assert hasattr(model, "partial_fit")
if len(x.chunks[1]) > 1:
x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1])))
nblocks = len(x.chunks[0])
order = list(range(nblocks))
if shuffle_blocks:
rng = sklearn.utils.check_random_state(random_state)
rng.shuffle(order)
name = "fit-" + dask.base.tokenize(model, x, y, kwargs, order)
dsk = {(name, -1): model}
dsk.update(
{
(name, i): (
_partial_fit,
(name, i - 1),
(x.name, order[i], 0),
(getattr(y, "name", ""), order[i]),
kwargs,
)
for i in range(nblocks)
}
)
graphs = {x.name: x.__dask_graph__(), name: dsk}
if hasattr(y, "__dask_graph__"):
graphs[y.name] = y.__dask_graph__()
try:
from dask.highlevelgraph import HighLevelGraph
new_dsk = HighLevelGraph.merge(*graphs.values())
except ImportError:
from dask import sharedict
new_dsk = sharedict.merge(*graphs.values())
value = Delayed((name, nblocks - 1), new_dsk)
if compute:
return value.compute()
else:
return value | python | def fit(model, x, y, compute=True, shuffle_blocks=True, random_state=None, **kwargs):
""" Fit scikit learn model against dask arrays
Model must support the ``partial_fit`` interface for online or batch
learning.
Ideally your rows are independent and identically distributed. By default,
this function will step through chunks of the arrays in random order.
Parameters
----------
model: sklearn model
Any model supporting partial_fit interface
x: dask Array
Two dimensional array, likely tall and skinny
y: dask Array
One dimensional array with same chunks as x's rows
compute : bool
Whether to compute this result
shuffle_blocks : bool
Whether to shuffle the blocks with ``random_state`` or not
random_state : int or numpy.random.RandomState
Random state to use when shuffling blocks
kwargs:
options to pass to partial_fit
Examples
--------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.randint(0, 2, 10, chunks=(5,))
>>> from sklearn.linear_model import SGDClassifier
>>> sgd = SGDClassifier()
>>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0])
>>> sgd # doctest: +SKIP
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False, verbose=0, warm_start=False)
This passes all of X and y through the classifier sequentially. We can use
the classifier as normal on in-memory data
>>> import numpy as np
>>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP
array([1, 0, 0, 1])
Or predict on a larger dataset
>>> z = da.random.random((400, 3), chunks=(100, 3))
>>> da.learn.predict(sgd, z) # doctest: +SKIP
dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64>
"""
if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"):
x = x.to_dask_array()
assert x.ndim == 2
if y is not None:
if not hasattr(y, "chunks") and hasattr(y, "to_dask_array"):
y = y.to_dask_array()
assert y.ndim == 1
assert x.chunks[0] == y.chunks[0]
assert hasattr(model, "partial_fit")
if len(x.chunks[1]) > 1:
x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1])))
nblocks = len(x.chunks[0])
order = list(range(nblocks))
if shuffle_blocks:
rng = sklearn.utils.check_random_state(random_state)
rng.shuffle(order)
name = "fit-" + dask.base.tokenize(model, x, y, kwargs, order)
dsk = {(name, -1): model}
dsk.update(
{
(name, i): (
_partial_fit,
(name, i - 1),
(x.name, order[i], 0),
(getattr(y, "name", ""), order[i]),
kwargs,
)
for i in range(nblocks)
}
)
graphs = {x.name: x.__dask_graph__(), name: dsk}
if hasattr(y, "__dask_graph__"):
graphs[y.name] = y.__dask_graph__()
try:
from dask.highlevelgraph import HighLevelGraph
new_dsk = HighLevelGraph.merge(*graphs.values())
except ImportError:
from dask import sharedict
new_dsk = sharedict.merge(*graphs.values())
value = Delayed((name, nblocks - 1), new_dsk)
if compute:
return value.compute()
else:
return value | [
"def",
"fit",
"(",
"model",
",",
"x",
",",
"y",
",",
"compute",
"=",
"True",
",",
"shuffle_blocks",
"=",
"True",
",",
"random_state",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"hasattr",
"(",
"x",
",",
"\"chunks\"",
")",
"and",
... | Fit scikit learn model against dask arrays
Model must support the ``partial_fit`` interface for online or batch
learning.
Ideally your rows are independent and identically distributed. By default,
this function will step through chunks of the arrays in random order.
Parameters
----------
model: sklearn model
Any model supporting partial_fit interface
x: dask Array
Two dimensional array, likely tall and skinny
y: dask Array
One dimensional array with same chunks as x's rows
compute : bool
Whether to compute this result
shuffle_blocks : bool
Whether to shuffle the blocks with ``random_state`` or not
random_state : int or numpy.random.RandomState
Random state to use when shuffling blocks
kwargs:
options to pass to partial_fit
Examples
--------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.randint(0, 2, 10, chunks=(5,))
>>> from sklearn.linear_model import SGDClassifier
>>> sgd = SGDClassifier()
>>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0])
>>> sgd # doctest: +SKIP
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False, verbose=0, warm_start=False)
This passes all of X and y through the classifier sequentially. We can use
the classifier as normal on in-memory data
>>> import numpy as np
>>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP
array([1, 0, 0, 1])
Or predict on a larger dataset
>>> z = da.random.random((400, 3), chunks=(100, 3))
>>> da.learn.predict(sgd, z) # doctest: +SKIP
dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64> | [
"Fit",
"scikit",
"learn",
"model",
"against",
"dask",
"arrays"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/_partial.py#L109-L217 | train | 216,152 |
dask/dask-ml | dask_ml/_partial.py | predict | def predict(model, x):
""" Predict with a scikit learn model
Parameters
----------
model : scikit learn classifier
x : dask Array
See docstring for ``da.learn.fit``
"""
if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"):
x = x.to_dask_array()
assert x.ndim == 2
if len(x.chunks[1]) > 1:
x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1])))
func = partial(_predict, model)
xx = np.zeros((1, x.shape[1]), dtype=x.dtype)
dt = model.predict(xx).dtype
return x.map_blocks(func, chunks=(x.chunks[0], (1,)), dtype=dt).squeeze() | python | def predict(model, x):
""" Predict with a scikit learn model
Parameters
----------
model : scikit learn classifier
x : dask Array
See docstring for ``da.learn.fit``
"""
if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"):
x = x.to_dask_array()
assert x.ndim == 2
if len(x.chunks[1]) > 1:
x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1])))
func = partial(_predict, model)
xx = np.zeros((1, x.shape[1]), dtype=x.dtype)
dt = model.predict(xx).dtype
return x.map_blocks(func, chunks=(x.chunks[0], (1,)), dtype=dt).squeeze() | [
"def",
"predict",
"(",
"model",
",",
"x",
")",
":",
"if",
"not",
"hasattr",
"(",
"x",
",",
"\"chunks\"",
")",
"and",
"hasattr",
"(",
"x",
",",
"\"to_dask_array\"",
")",
":",
"x",
"=",
"x",
".",
"to_dask_array",
"(",
")",
"assert",
"x",
".",
"ndim",... | Predict with a scikit learn model
Parameters
----------
model : scikit learn classifier
x : dask Array
See docstring for ``da.learn.fit`` | [
"Predict",
"with",
"a",
"scikit",
"learn",
"model"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/_partial.py#L224-L242 | train | 216,153 |
dask/dask-ml | dask_ml/cluster/spectral.py | _slice_mostly_sorted | def _slice_mostly_sorted(array, keep, rest, ind=None):
"""Slice dask array `array` that is almost entirely sorted already.
We perform approximately `2 * len(keep)` slices on `array`.
This is OK, since `keep` is small. Individually, each of these slices
is entirely sorted.
Parameters
----------
array : dask.array.Array
keep : ndarray[Int]
This must be sorted.
rest : ndarray[Bool]
ind : ndarray[Int], optional
Returns
-------
sliced : dask.array.Array
"""
if ind is None:
ind = np.arange(len(array))
idx = np.argsort(np.concatenate([keep, ind[rest]]))
slices = []
if keep[0] > 0: # avoid creating empty slices
slices.append(slice(None, keep[0]))
slices.append([keep[0]])
windows = zip(keep[:-1], keep[1:])
for l, r in windows:
if r > l + 1: # avoid creating empty slices
slices.append(slice(l + 1, r))
slices.append([r])
if keep[-1] < len(array) - 1: # avoid creating empty slices
slices.append(slice(keep[-1] + 1, None))
result = da.concatenate([array[idx[slice_]] for slice_ in slices])
return result | python | def _slice_mostly_sorted(array, keep, rest, ind=None):
"""Slice dask array `array` that is almost entirely sorted already.
We perform approximately `2 * len(keep)` slices on `array`.
This is OK, since `keep` is small. Individually, each of these slices
is entirely sorted.
Parameters
----------
array : dask.array.Array
keep : ndarray[Int]
This must be sorted.
rest : ndarray[Bool]
ind : ndarray[Int], optional
Returns
-------
sliced : dask.array.Array
"""
if ind is None:
ind = np.arange(len(array))
idx = np.argsort(np.concatenate([keep, ind[rest]]))
slices = []
if keep[0] > 0: # avoid creating empty slices
slices.append(slice(None, keep[0]))
slices.append([keep[0]])
windows = zip(keep[:-1], keep[1:])
for l, r in windows:
if r > l + 1: # avoid creating empty slices
slices.append(slice(l + 1, r))
slices.append([r])
if keep[-1] < len(array) - 1: # avoid creating empty slices
slices.append(slice(keep[-1] + 1, None))
result = da.concatenate([array[idx[slice_]] for slice_ in slices])
return result | [
"def",
"_slice_mostly_sorted",
"(",
"array",
",",
"keep",
",",
"rest",
",",
"ind",
"=",
"None",
")",
":",
"if",
"ind",
"is",
"None",
":",
"ind",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"array",
")",
")",
"idx",
"=",
"np",
".",
"argsort",
"(",
... | Slice dask array `array` that is almost entirely sorted already.
We perform approximately `2 * len(keep)` slices on `array`.
This is OK, since `keep` is small. Individually, each of these slices
is entirely sorted.
Parameters
----------
array : dask.array.Array
keep : ndarray[Int]
This must be sorted.
rest : ndarray[Bool]
ind : ndarray[Int], optional
Returns
-------
sliced : dask.array.Array | [
"Slice",
"dask",
"array",
"array",
"that",
"is",
"almost",
"entirely",
"sorted",
"already",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/spectral.py#L339-L376 | train | 216,154 |
dask/dask-ml | dask_ml/datasets.py | make_counts | def make_counts(
n_samples=1000,
n_features=100,
n_informative=2,
scale=1.0,
chunks=100,
random_state=None,
):
"""
Generate a dummy dataset for modeling count data.
Parameters
----------
n_samples : int
number of rows in the output array
n_features : int
number of columns (features) in the output array
n_informative : int
number of features that are correlated with the outcome
scale : float
Scale the true coefficient array by this
chunks : int
Number of rows per dask array block.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : dask.array, size ``(n_samples, n_features)``
y : dask.array, size ``(n_samples,)``
array of non-negative integer-valued data
Examples
--------
>>> X, y = make_counts()
"""
rng = dask_ml.utils.check_random_state(random_state)
X = rng.normal(0, 1, size=(n_samples, n_features), chunks=(chunks, n_features))
informative_idx = rng.choice(n_features, n_informative, chunks=n_informative)
beta = (rng.random(n_features, chunks=n_features) - 1) * scale
informative_idx, beta = dask.compute(informative_idx, beta)
z0 = X[:, informative_idx].dot(beta[informative_idx])
rate = da.exp(z0)
y = rng.poisson(rate, size=1, chunks=(chunks,))
return X, y | python | def make_counts(
n_samples=1000,
n_features=100,
n_informative=2,
scale=1.0,
chunks=100,
random_state=None,
):
"""
Generate a dummy dataset for modeling count data.
Parameters
----------
n_samples : int
number of rows in the output array
n_features : int
number of columns (features) in the output array
n_informative : int
number of features that are correlated with the outcome
scale : float
Scale the true coefficient array by this
chunks : int
Number of rows per dask array block.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : dask.array, size ``(n_samples, n_features)``
y : dask.array, size ``(n_samples,)``
array of non-negative integer-valued data
Examples
--------
>>> X, y = make_counts()
"""
rng = dask_ml.utils.check_random_state(random_state)
X = rng.normal(0, 1, size=(n_samples, n_features), chunks=(chunks, n_features))
informative_idx = rng.choice(n_features, n_informative, chunks=n_informative)
beta = (rng.random(n_features, chunks=n_features) - 1) * scale
informative_idx, beta = dask.compute(informative_idx, beta)
z0 = X[:, informative_idx].dot(beta[informative_idx])
rate = da.exp(z0)
y = rng.poisson(rate, size=1, chunks=(chunks,))
return X, y | [
"def",
"make_counts",
"(",
"n_samples",
"=",
"1000",
",",
"n_features",
"=",
"100",
",",
"n_informative",
"=",
"2",
",",
"scale",
"=",
"1.0",
",",
"chunks",
"=",
"100",
",",
"random_state",
"=",
"None",
",",
")",
":",
"rng",
"=",
"dask_ml",
".",
"uti... | Generate a dummy dataset for modeling count data.
Parameters
----------
n_samples : int
number of rows in the output array
n_features : int
number of columns (features) in the output array
n_informative : int
number of features that are correlated with the outcome
scale : float
Scale the true coefficient array by this
chunks : int
Number of rows per dask array block.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : dask.array, size ``(n_samples, n_features)``
y : dask.array, size ``(n_samples,)``
array of non-negative integer-valued data
Examples
--------
>>> X, y = make_counts() | [
"Generate",
"a",
"dummy",
"dataset",
"for",
"modeling",
"count",
"data",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/datasets.py#L24-L73 | train | 216,155 |
dask/dask-ml | dask_ml/datasets.py | make_blobs | def make_blobs(
n_samples=100,
n_features=2,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
chunks=None,
):
"""
Generate isotropic Gaussian blobs for clustering.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int or array-like, optional (default=100)
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=None)
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from dask_ml.datasets import make_blobs
>>> X, y = make_blobs(n_samples=100000, chunks=10000)
>>> X
dask.array<..., shape=(100000, 2), dtype=float64, chunksize=(10000, 2)>
>>> y
dask.array<concatenate, shape=(100000,), dtype=int64, chunksize=(10000,)>
See Also
--------
make_classification: a more intricate variant
"""
chunks = da.core.normalize_chunks(chunks, (n_samples, n_features))
_check_axis_partitioning(chunks, n_features)
if centers is None:
# TODO: non-int n_samples?
centers = 3
if isinstance(centers, numbers.Integral):
# Make a prototype
n_centers = centers
X, y = sklearn.datasets.make_blobs(
n_samples=chunks[0][0],
n_features=n_features,
centers=centers,
shuffle=shuffle,
cluster_std=cluster_std,
center_box=center_box,
random_state=random_state,
)
centers = []
centers = np.zeros((n_centers, n_features))
for i in range(n_centers):
centers[i] = X[y == i].mean(0)
objs = [
dask.delayed(sklearn.datasets.make_blobs, nout=2)(
n_samples=n_samples_per_block,
n_features=n_features,
centers=centers,
cluster_std=cluster_std,
shuffle=shuffle,
center_box=center_box,
random_state=i,
)
for i, n_samples_per_block in enumerate(chunks[0])
]
Xobjs, yobjs = zip(*objs)
Xarrs = [
da.from_delayed(arr, shape=(n, n_features), dtype="f8")
for arr, n in zip(Xobjs, chunks[0])
]
X_big = da.vstack(Xarrs)
yarrs = [
da.from_delayed(arr, shape=(n,), dtype=np.dtype("int"))
for arr, n in zip(yobjs, chunks[0])
]
y_big = da.hstack(yarrs)
return X_big, y_big | python | def make_blobs(
n_samples=100,
n_features=2,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
chunks=None,
):
"""
Generate isotropic Gaussian blobs for clustering.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int or array-like, optional (default=100)
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=None)
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from dask_ml.datasets import make_blobs
>>> X, y = make_blobs(n_samples=100000, chunks=10000)
>>> X
dask.array<..., shape=(100000, 2), dtype=float64, chunksize=(10000, 2)>
>>> y
dask.array<concatenate, shape=(100000,), dtype=int64, chunksize=(10000,)>
See Also
--------
make_classification: a more intricate variant
"""
chunks = da.core.normalize_chunks(chunks, (n_samples, n_features))
_check_axis_partitioning(chunks, n_features)
if centers is None:
# TODO: non-int n_samples?
centers = 3
if isinstance(centers, numbers.Integral):
# Make a prototype
n_centers = centers
X, y = sklearn.datasets.make_blobs(
n_samples=chunks[0][0],
n_features=n_features,
centers=centers,
shuffle=shuffle,
cluster_std=cluster_std,
center_box=center_box,
random_state=random_state,
)
centers = []
centers = np.zeros((n_centers, n_features))
for i in range(n_centers):
centers[i] = X[y == i].mean(0)
objs = [
dask.delayed(sklearn.datasets.make_blobs, nout=2)(
n_samples=n_samples_per_block,
n_features=n_features,
centers=centers,
cluster_std=cluster_std,
shuffle=shuffle,
center_box=center_box,
random_state=i,
)
for i, n_samples_per_block in enumerate(chunks[0])
]
Xobjs, yobjs = zip(*objs)
Xarrs = [
da.from_delayed(arr, shape=(n, n_features), dtype="f8")
for arr, n in zip(Xobjs, chunks[0])
]
X_big = da.vstack(Xarrs)
yarrs = [
da.from_delayed(arr, shape=(n,), dtype=np.dtype("int"))
for arr, n in zip(yobjs, chunks[0])
]
y_big = da.hstack(yarrs)
return X_big, y_big | [
"def",
"make_blobs",
"(",
"n_samples",
"=",
"100",
",",
"n_features",
"=",
"2",
",",
"centers",
"=",
"None",
",",
"cluster_std",
"=",
"1.0",
",",
"center_box",
"=",
"(",
"-",
"10.0",
",",
"10.0",
")",
",",
"shuffle",
"=",
"True",
",",
"random_state",
... | Generate isotropic Gaussian blobs for clustering.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int or array-like, optional (default=100)
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=None)
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from dask_ml.datasets import make_blobs
>>> X, y = make_blobs(n_samples=100000, chunks=10000)
>>> X
dask.array<..., shape=(100000, 2), dtype=float64, chunksize=(10000, 2)>
>>> y
dask.array<concatenate, shape=(100000,), dtype=int64, chunksize=(10000,)>
See Also
--------
make_classification: a more intricate variant | [
"Generate",
"isotropic",
"Gaussian",
"blobs",
"for",
"clustering",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/datasets.py#L76-L203 | train | 216,156 |
dask/dask-ml | dask_ml/datasets.py | make_regression | def make_regression(
n_samples=100,
n_features=100,
n_informative=10,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=True,
coef=False,
random_state=None,
chunks=None,
):
"""
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See
:func:`sklearn.datasets.make_low_rank_matrix` for more details.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : Dask array of shape [n_samples, n_features]
The input samples.
y : Dask array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
chunks = da.core.normalize_chunks(chunks, (n_samples, n_features))
_check_axis_partitioning(chunks, n_features)
rng = sklearn.utils.check_random_state(random_state)
return_coef = coef is True
if chunks[1][0] != n_features:
raise ValueError(
"Can only generate arrays partitioned along the "
"first axis. Specifying a larger chunksize for "
"the second axis."
)
_, _, coef = sklearn.datasets.make_regression(
n_samples=chunks[0][0],
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
effective_rank=effective_rank,
tail_strength=tail_strength,
noise=noise,
shuffle=shuffle,
coef=True, # hardcode here
random_state=rng,
)
seed = da.random.random_state_data(1, random_state=rng)
da_rng = da.random.RandomState(seed[0])
X_big = da_rng.normal(size=(n_samples, n_features), chunks=(chunks[0], n_features))
y_big = da.dot(X_big, coef) + bias
if noise > 0:
y_big = y_big + da_rng.normal(
scale=noise, size=y_big.shape, chunks=y_big.chunks
)
y_big = y_big.squeeze()
if return_coef:
return X_big, y_big, coef
else:
return X_big, y_big | python | def make_regression(
n_samples=100,
n_features=100,
n_informative=10,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=True,
coef=False,
random_state=None,
chunks=None,
):
"""
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See
:func:`sklearn.datasets.make_low_rank_matrix` for more details.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : Dask array of shape [n_samples, n_features]
The input samples.
y : Dask array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
chunks = da.core.normalize_chunks(chunks, (n_samples, n_features))
_check_axis_partitioning(chunks, n_features)
rng = sklearn.utils.check_random_state(random_state)
return_coef = coef is True
if chunks[1][0] != n_features:
raise ValueError(
"Can only generate arrays partitioned along the "
"first axis. Specifying a larger chunksize for "
"the second axis."
)
_, _, coef = sklearn.datasets.make_regression(
n_samples=chunks[0][0],
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
effective_rank=effective_rank,
tail_strength=tail_strength,
noise=noise,
shuffle=shuffle,
coef=True, # hardcode here
random_state=rng,
)
seed = da.random.random_state_data(1, random_state=rng)
da_rng = da.random.RandomState(seed[0])
X_big = da_rng.normal(size=(n_samples, n_features), chunks=(chunks[0], n_features))
y_big = da.dot(X_big, coef) + bias
if noise > 0:
y_big = y_big + da_rng.normal(
scale=noise, size=y_big.shape, chunks=y_big.chunks
)
y_big = y_big.squeeze()
if return_coef:
return X_big, y_big, coef
else:
return X_big, y_big | [
"def",
"make_regression",
"(",
"n_samples",
"=",
"100",
",",
"n_features",
"=",
"100",
",",
"n_informative",
"=",
"10",
",",
"n_targets",
"=",
"1",
",",
"bias",
"=",
"0.0",
",",
"effective_rank",
"=",
"None",
",",
"tail_strength",
"=",
"0.5",
",",
"noise... | Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See
:func:`sklearn.datasets.make_low_rank_matrix` for more details.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : Dask array of shape [n_samples, n_features]
The input samples.
y : Dask array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True. | [
"Generate",
"a",
"random",
"regression",
"problem",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/datasets.py#L206-L338 | train | 216,157 |
dask/dask-ml | dask_ml/decomposition/truncated_svd.py | TruncatedSVD.fit_transform | def fit_transform(self, X, y=None):
"""Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array, of the
same type as the input array. If ``X`` was a ``dask.array``, then
``X_new`` will be a ``dask.array`` with the same chunks along the
first dimension.
"""
X = self._check_array(X)
if self.algorithm not in {"tsqr", "randomized"}:
raise ValueError()
if self.algorithm == "tsqr":
u, s, v = da.linalg.svd(X)
u = u[:, : self.n_components]
s = s[: self.n_components]
v = v[: self.n_components]
else:
u, s, v = da.linalg.svd_compressed(
X, self.n_components, self.n_iter, seed=self.random_state
)
u, v = svd_flip(u, v)
X_transformed = u * s
explained_var = X_transformed.var(axis=0)
full_var = X.var(axis=0).sum()
explained_variance_ratio = explained_var / full_var
components, ev, evr, sv = compute(v, explained_var, explained_variance_ratio, s)
self.components_ = components
self.explained_variance_ = ev
self.explained_variance_ratio_ = evr
self.singular_values_ = sv
return X_transformed | python | def fit_transform(self, X, y=None):
"""Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array, of the
same type as the input array. If ``X`` was a ``dask.array``, then
``X_new`` will be a ``dask.array`` with the same chunks along the
first dimension.
"""
X = self._check_array(X)
if self.algorithm not in {"tsqr", "randomized"}:
raise ValueError()
if self.algorithm == "tsqr":
u, s, v = da.linalg.svd(X)
u = u[:, : self.n_components]
s = s[: self.n_components]
v = v[: self.n_components]
else:
u, s, v = da.linalg.svd_compressed(
X, self.n_components, self.n_iter, seed=self.random_state
)
u, v = svd_flip(u, v)
X_transformed = u * s
explained_var = X_transformed.var(axis=0)
full_var = X.var(axis=0).sum()
explained_variance_ratio = explained_var / full_var
components, ev, evr, sv = compute(v, explained_var, explained_variance_ratio, s)
self.components_ = components
self.explained_variance_ = ev
self.explained_variance_ratio_ = evr
self.singular_values_ = sv
return X_transformed | [
"def",
"fit_transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"X",
"=",
"self",
".",
"_check_array",
"(",
"X",
")",
"if",
"self",
".",
"algorithm",
"not",
"in",
"{",
"\"tsqr\"",
",",
"\"randomized\"",
"}",
":",
"raise",
"ValueError"... | Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array, of the
same type as the input array. If ``X`` was a ``dask.array``, then
``X_new`` will be a ``dask.array`` with the same chunks along the
first dimension. | [
"Fit",
"model",
"to",
"X",
"and",
"perform",
"dimensionality",
"reduction",
"on",
"X",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/truncated_svd.py#L144-L186 | train | 216,158 |
dask/dask-ml | dask_ml/compose/_column_transformer.py | ColumnTransformer._hstack | def _hstack(self, Xs):
"""
Stacks X horizontally.
Supports input types (X): list of
numpy arrays, sparse arrays and DataFrames
"""
types = set(type(X) for X in Xs)
if self.sparse_output_:
return sparse.hstack(Xs).tocsr()
elif dd.Series in types or dd.DataFrame in types:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Concatenating", UserWarning)
return dd.concat(Xs, axis="columns")
elif da.Array in types:
# To allow compatibility with dask core 1.0.0, this is the `else`
# part of the definition of the dask.array.hstack inlined.
# The `then` branch is removed because _validate_output in
# sklearn.compose.ColumnTransformer ensures ndim == 2, so the
# check `all(x.ndim == 1 for x in Xs)` should always fail.
#
# Once dask.array.hstack supports allow_unknown_chunksizes,
# changed this to da.hstack(Xs, allow_unknown_chunksizes=True)
return da.concatenate(Xs, axis=1, allow_unknown_chunksizes=True)
elif self.preserve_dataframe and (pd.Series in types or pd.DataFrame in types):
return pd.concat(Xs, axis="columns")
else:
return np.hstack(Xs) | python | def _hstack(self, Xs):
"""
Stacks X horizontally.
Supports input types (X): list of
numpy arrays, sparse arrays and DataFrames
"""
types = set(type(X) for X in Xs)
if self.sparse_output_:
return sparse.hstack(Xs).tocsr()
elif dd.Series in types or dd.DataFrame in types:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Concatenating", UserWarning)
return dd.concat(Xs, axis="columns")
elif da.Array in types:
# To allow compatibility with dask core 1.0.0, this is the `else`
# part of the definition of the dask.array.hstack inlined.
# The `then` branch is removed because _validate_output in
# sklearn.compose.ColumnTransformer ensures ndim == 2, so the
# check `all(x.ndim == 1 for x in Xs)` should always fail.
#
# Once dask.array.hstack supports allow_unknown_chunksizes,
# changed this to da.hstack(Xs, allow_unknown_chunksizes=True)
return da.concatenate(Xs, axis=1, allow_unknown_chunksizes=True)
elif self.preserve_dataframe and (pd.Series in types or pd.DataFrame in types):
return pd.concat(Xs, axis="columns")
else:
return np.hstack(Xs) | [
"def",
"_hstack",
"(",
"self",
",",
"Xs",
")",
":",
"types",
"=",
"set",
"(",
"type",
"(",
"X",
")",
"for",
"X",
"in",
"Xs",
")",
"if",
"self",
".",
"sparse_output_",
":",
"return",
"sparse",
".",
"hstack",
"(",
"Xs",
")",
".",
"tocsr",
"(",
")... | Stacks X horizontally.
Supports input types (X): list of
numpy arrays, sparse arrays and DataFrames | [
"Stacks",
"X",
"horizontally",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/compose/_column_transformer.py#L172-L200 | train | 216,159 |
dask/dask-ml | dask_ml/decomposition/pca.py | PCA.transform | def transform(self, X):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, ["mean_", "components_"], all_or_any=all)
# X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = da.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed | python | def transform(self, X):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, ["mean_", "components_"], all_or_any=all)
# X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = da.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"[",
"\"mean_\"",
",",
"\"components_\"",
"]",
",",
"all_or_any",
"=",
"all",
")",
"# X = check_array(X)",
"if",
"self",
".",
"mean_",
"is",
"not",
"None",
":",
"X"... | Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components) | [
"Apply",
"dimensionality",
"reduction",
"on",
"X",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L319-L344 | train | 216,160 |
dask/dask-ml | dask_ml/decomposition/pca.py | PCA.fit_transform | def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# X = check_array(X)
U, S, V = self._fit(X)
U = U[:, : self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= np.sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[: self.n_components_]
return U | python | def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# X = check_array(X)
U, S, V = self._fit(X)
U = U[:, : self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= np.sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[: self.n_components_]
return U | [
"def",
"fit_transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"# X = check_array(X)",
"U",
",",
"S",
",",
"V",
"=",
"self",
".",
"_fit",
"(",
"X",
")",
"U",
"=",
"U",
"[",
":",
",",
":",
"self",
".",
"n_components_",
"]",
"if"... | Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components) | [
"Fit",
"the",
"model",
"with",
"X",
"and",
"apply",
"the",
"dimensionality",
"reduction",
"on",
"X",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L346-L373 | train | 216,161 |
dask/dask-ml | dask_ml/decomposition/pca.py | PCA.inverse_transform | def inverse_transform(self, X):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, "mean_")
if self.whiten:
return (
da.dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,
)
+ self.mean_
)
else:
return da.dot(X, self.components_) + self.mean_ | python | def inverse_transform(self, X):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, "mean_")
if self.whiten:
return (
da.dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,
)
+ self.mean_
)
else:
return da.dot(X, self.components_) + self.mean_ | [
"def",
"inverse_transform",
"(",
"self",
",",
"X",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"\"mean_\"",
")",
"if",
"self",
".",
"whiten",
":",
"return",
"(",
"da",
".",
"dot",
"(",
"X",
",",
"np",
".",
"sqrt",
"(",
"self",
".",
"explained_vari... | Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform. | [
"Transform",
"data",
"back",
"to",
"its",
"original",
"space",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L375-L406 | train | 216,162 |
dask/dask-ml | dask_ml/decomposition/pca.py | PCA.score_samples | def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array, shape(n_samples, n_features)
The data.
Returns
-------
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, "mean_")
# X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision() # [n_features, n_features]
log_like = -0.5 * (Xr * (da.dot(Xr, precision))).sum(axis=1)
log_like -= 0.5 * (n_features * da.log(2.0 * np.pi) - fast_logdet(precision))
return log_like | python | def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array, shape(n_samples, n_features)
The data.
Returns
-------
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, "mean_")
# X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision() # [n_features, n_features]
log_like = -0.5 * (Xr * (da.dot(Xr, precision))).sum(axis=1)
log_like -= 0.5 * (n_features * da.log(2.0 * np.pi) - fast_logdet(precision))
return log_like | [
"def",
"score_samples",
"(",
"self",
",",
"X",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"\"mean_\"",
")",
"# X = check_array(X)",
"Xr",
"=",
"X",
"-",
"self",
".",
"mean_",
"n_features",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"precision",
"=",
"s... | Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array, shape(n_samples, n_features)
The data.
Returns
-------
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model | [
"Return",
"the",
"log",
"-",
"likelihood",
"of",
"each",
"sample",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L408-L433 | train | 216,163 |
dask/dask-ml | dask_ml/utils.py | assert_estimator_equal | def assert_estimator_equal(left, right, exclude=None, **kwargs):
"""Check that two Estimators are equal
Parameters
----------
left, right : Estimators
exclude : str or sequence of str
attributes to skip in the check
kwargs : dict
Passed through to the dask `assert_eq` method.
"""
left_attrs = [x for x in dir(left) if x.endswith("_") and not x.startswith("_")]
right_attrs = [x for x in dir(right) if x.endswith("_") and not x.startswith("_")]
if exclude is None:
exclude = set()
elif isinstance(exclude, str):
exclude = {exclude}
else:
exclude = set(exclude)
assert (set(left_attrs) - exclude) == set(right_attrs) - exclude
for attr in set(left_attrs) - exclude:
l = getattr(left, attr)
r = getattr(right, attr)
_assert_eq(l, r, **kwargs) | python | def assert_estimator_equal(left, right, exclude=None, **kwargs):
"""Check that two Estimators are equal
Parameters
----------
left, right : Estimators
exclude : str or sequence of str
attributes to skip in the check
kwargs : dict
Passed through to the dask `assert_eq` method.
"""
left_attrs = [x for x in dir(left) if x.endswith("_") and not x.startswith("_")]
right_attrs = [x for x in dir(right) if x.endswith("_") and not x.startswith("_")]
if exclude is None:
exclude = set()
elif isinstance(exclude, str):
exclude = {exclude}
else:
exclude = set(exclude)
assert (set(left_attrs) - exclude) == set(right_attrs) - exclude
for attr in set(left_attrs) - exclude:
l = getattr(left, attr)
r = getattr(right, attr)
_assert_eq(l, r, **kwargs) | [
"def",
"assert_estimator_equal",
"(",
"left",
",",
"right",
",",
"exclude",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"left_attrs",
"=",
"[",
"x",
"for",
"x",
"in",
"dir",
"(",
"left",
")",
"if",
"x",
".",
"endswith",
"(",
"\"_\"",
")",
"and"... | Check that two Estimators are equal
Parameters
----------
left, right : Estimators
exclude : str or sequence of str
attributes to skip in the check
kwargs : dict
Passed through to the dask `assert_eq` method. | [
"Check",
"that",
"two",
"Estimators",
"are",
"equal"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/utils.py#L58-L84 | train | 216,164 |
dask/dask-ml | dask_ml/utils.py | check_matching_blocks | def check_matching_blocks(*arrays):
"""Check that the partitioning structure for many arrays matches.
Parameters
----------
*arrays : Sequence of array-likes
This includes
* Dask Array
* Dask DataFrame
* Dask Series
"""
if len(arrays) <= 1:
return
if all(isinstance(x, da.Array) for x in arrays):
# TODO: unknown chunks, ensure blocks match, or just raise (configurable)
chunks = arrays[0].chunks
for array in arrays[1:]:
if array.chunks != chunks:
raise ValueError(
"Mismatched chunks. {} != {}".format(chunks, array.chunks)
)
elif all(isinstance(x, (dd.Series, dd.DataFrame)) for x in arrays):
divisions = arrays[0].divisions
for array in arrays[1:]:
if array.divisions != divisions:
raise ValueError(
"Mismatched divisions. {} != {}".format(divisions, array.divisions)
)
else:
raise ValueError("Unexpected types {}.".format({type(x) for x in arrays})) | python | def check_matching_blocks(*arrays):
"""Check that the partitioning structure for many arrays matches.
Parameters
----------
*arrays : Sequence of array-likes
This includes
* Dask Array
* Dask DataFrame
* Dask Series
"""
if len(arrays) <= 1:
return
if all(isinstance(x, da.Array) for x in arrays):
# TODO: unknown chunks, ensure blocks match, or just raise (configurable)
chunks = arrays[0].chunks
for array in arrays[1:]:
if array.chunks != chunks:
raise ValueError(
"Mismatched chunks. {} != {}".format(chunks, array.chunks)
)
elif all(isinstance(x, (dd.Series, dd.DataFrame)) for x in arrays):
divisions = arrays[0].divisions
for array in arrays[1:]:
if array.divisions != divisions:
raise ValueError(
"Mismatched divisions. {} != {}".format(divisions, array.divisions)
)
else:
raise ValueError("Unexpected types {}.".format({type(x) for x in arrays})) | [
"def",
"check_matching_blocks",
"(",
"*",
"arrays",
")",
":",
"if",
"len",
"(",
"arrays",
")",
"<=",
"1",
":",
"return",
"if",
"all",
"(",
"isinstance",
"(",
"x",
",",
"da",
".",
"Array",
")",
"for",
"x",
"in",
"arrays",
")",
":",
"# TODO: unknown ch... | Check that the partitioning structure for many arrays matches.
Parameters
----------
*arrays : Sequence of array-likes
This includes
* Dask Array
* Dask DataFrame
* Dask Series | [
"Check",
"that",
"the",
"partitioning",
"structure",
"for",
"many",
"arrays",
"matches",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/utils.py#L190-L221 | train | 216,165 |
dask/dask-ml | dask_ml/utils.py | check_chunks | def check_chunks(n_samples, n_features, chunks=None):
"""Validate and normalize the chunks argument for a dask.array
Parameters
----------
n_samples, n_features : int
Give the shape of the array
chunks : int, sequence, optional, default None
* For 'chunks=None', this picks a "good" default number of chunks based
on the number of CPU cores. The default results in a block structure
with one block per core along the first dimension (of roughly equal
lengths) and a single block along the second dimension. This may or
may not be appropriate for your use-case. The chunk size will be at
least 100 along the first dimension.
* When chunks is an int, we split the ``n_samples`` into ``chunks``
blocks along the first dimension, and a single block along the
second. Again, the chunksize will be at least 100 along the first
dimension.
* When chunks is a sequence, we validate that it's length two and turn
it into a tuple.
Returns
-------
chunks : tuple
"""
if chunks is None:
chunks = (max(100, n_samples // cpu_count()), n_features)
elif isinstance(chunks, Integral):
chunks = (max(100, n_samples // chunks), n_features)
elif isinstance(chunks, Sequence):
chunks = tuple(chunks)
if len(chunks) != 2:
raise AssertionError("Chunks should be a 2-tuple.")
else:
raise ValueError("Unknown type of chunks: '{}'".format(type(chunks)))
return chunks | python | def check_chunks(n_samples, n_features, chunks=None):
"""Validate and normalize the chunks argument for a dask.array
Parameters
----------
n_samples, n_features : int
Give the shape of the array
chunks : int, sequence, optional, default None
* For 'chunks=None', this picks a "good" default number of chunks based
on the number of CPU cores. The default results in a block structure
with one block per core along the first dimension (of roughly equal
lengths) and a single block along the second dimension. This may or
may not be appropriate for your use-case. The chunk size will be at
least 100 along the first dimension.
* When chunks is an int, we split the ``n_samples`` into ``chunks``
blocks along the first dimension, and a single block along the
second. Again, the chunksize will be at least 100 along the first
dimension.
* When chunks is a sequence, we validate that it's length two and turn
it into a tuple.
Returns
-------
chunks : tuple
"""
if chunks is None:
chunks = (max(100, n_samples // cpu_count()), n_features)
elif isinstance(chunks, Integral):
chunks = (max(100, n_samples // chunks), n_features)
elif isinstance(chunks, Sequence):
chunks = tuple(chunks)
if len(chunks) != 2:
raise AssertionError("Chunks should be a 2-tuple.")
else:
raise ValueError("Unknown type of chunks: '{}'".format(type(chunks)))
return chunks | [
"def",
"check_chunks",
"(",
"n_samples",
",",
"n_features",
",",
"chunks",
"=",
"None",
")",
":",
"if",
"chunks",
"is",
"None",
":",
"chunks",
"=",
"(",
"max",
"(",
"100",
",",
"n_samples",
"//",
"cpu_count",
"(",
")",
")",
",",
"n_features",
")",
"e... | Validate and normalize the chunks argument for a dask.array
Parameters
----------
n_samples, n_features : int
Give the shape of the array
chunks : int, sequence, optional, default None
* For 'chunks=None', this picks a "good" default number of chunks based
on the number of CPU cores. The default results in a block structure
with one block per core along the first dimension (of roughly equal
lengths) and a single block along the second dimension. This may or
may not be appropriate for your use-case. The chunk size will be at
least 100 along the first dimension.
* When chunks is an int, we split the ``n_samples`` into ``chunks``
blocks along the first dimension, and a single block along the
second. Again, the chunksize will be at least 100 along the first
dimension.
* When chunks is a sequence, we validate that it's length two and turn
it into a tuple.
Returns
-------
chunks : tuple | [
"Validate",
"and",
"normalize",
"the",
"chunks",
"argument",
"for",
"a",
"dask",
".",
"array"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/utils.py#L224-L261 | train | 216,166 |
dask/dask-ml | dask_ml/metrics/scorer.py | get_scorer | def get_scorer(scoring, compute=True):
"""Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
"""
# This is the same as sklearns, only we use our SCORERS dict,
# and don't have back-compat code
if isinstance(scoring, six.string_types):
try:
scorer, kwargs = SCORERS[scoring]
except KeyError:
raise ValueError(
"{} is not a valid scoring value. "
"Valid options are {}".format(scoring, sorted(SCORERS))
)
else:
scorer = scoring
kwargs = {}
kwargs["compute"] = compute
return make_scorer(scorer, **kwargs) | python | def get_scorer(scoring, compute=True):
"""Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
"""
# This is the same as sklearns, only we use our SCORERS dict,
# and don't have back-compat code
if isinstance(scoring, six.string_types):
try:
scorer, kwargs = SCORERS[scoring]
except KeyError:
raise ValueError(
"{} is not a valid scoring value. "
"Valid options are {}".format(scoring, sorted(SCORERS))
)
else:
scorer = scoring
kwargs = {}
kwargs["compute"] = compute
return make_scorer(scorer, **kwargs) | [
"def",
"get_scorer",
"(",
"scoring",
",",
"compute",
"=",
"True",
")",
":",
"# This is the same as sklearns, only we use our SCORERS dict,",
"# and don't have back-compat code",
"if",
"isinstance",
"(",
"scoring",
",",
"six",
".",
"string_types",
")",
":",
"try",
":",
... | Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer. | [
"Get",
"a",
"scorer",
"from",
"string"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/metrics/scorer.py#L22-L51 | train | 216,167 |
dask/dask-ml | dask_ml/model_selection/utils.py | to_indexable | def to_indexable(*args, **kwargs):
"""Ensure that all args are an indexable type.
Conversion runs lazily for dask objects, immediately otherwise.
Parameters
----------
args : array_like or scalar
allow_scalars : bool, optional
Whether to allow scalars in args. Default is False.
"""
if kwargs.get("allow_scalars", False):
indexable = _maybe_indexable
else:
indexable = _indexable
for x in args:
if x is None or isinstance(x, (da.Array, dd.DataFrame)):
yield x
elif is_dask_collection(x):
yield delayed(indexable, pure=True)(x)
else:
yield indexable(x) | python | def to_indexable(*args, **kwargs):
"""Ensure that all args are an indexable type.
Conversion runs lazily for dask objects, immediately otherwise.
Parameters
----------
args : array_like or scalar
allow_scalars : bool, optional
Whether to allow scalars in args. Default is False.
"""
if kwargs.get("allow_scalars", False):
indexable = _maybe_indexable
else:
indexable = _indexable
for x in args:
if x is None or isinstance(x, (da.Array, dd.DataFrame)):
yield x
elif is_dask_collection(x):
yield delayed(indexable, pure=True)(x)
else:
yield indexable(x) | [
"def",
"to_indexable",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
".",
"get",
"(",
"\"allow_scalars\"",
",",
"False",
")",
":",
"indexable",
"=",
"_maybe_indexable",
"else",
":",
"indexable",
"=",
"_indexable",
"for",
"x",
"in",... | Ensure that all args are an indexable type.
Conversion runs lazily for dask objects, immediately otherwise.
Parameters
----------
args : array_like or scalar
allow_scalars : bool, optional
Whether to allow scalars in args. Default is False. | [
"Ensure",
"that",
"all",
"args",
"are",
"an",
"indexable",
"type",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/utils.py#L33-L54 | train | 216,168 |
dask/dask-ml | dask_ml/model_selection/utils.py | _index_param_value | def _index_param_value(num_samples, v, indices):
"""Private helper function for parameter value indexing.
This determines whether a fit parameter `v` to a SearchCV.fit
should be indexed along with `X` and `y`. Note that this differs
from the scikit-learn version. They pass `X` and compute num_samples.
We pass `num_samples` instead.
"""
if not _is_arraylike(v) or _num_samples(v) != num_samples:
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices) | python | def _index_param_value(num_samples, v, indices):
"""Private helper function for parameter value indexing.
This determines whether a fit parameter `v` to a SearchCV.fit
should be indexed along with `X` and `y`. Note that this differs
from the scikit-learn version. They pass `X` and compute num_samples.
We pass `num_samples` instead.
"""
if not _is_arraylike(v) or _num_samples(v) != num_samples:
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices) | [
"def",
"_index_param_value",
"(",
"num_samples",
",",
"v",
",",
"indices",
")",
":",
"if",
"not",
"_is_arraylike",
"(",
"v",
")",
"or",
"_num_samples",
"(",
"v",
")",
"!=",
"num_samples",
":",
"# pass through: skip indexing",
"return",
"v",
"if",
"sp",
".",
... | Private helper function for parameter value indexing.
This determines whether a fit parameter `v` to a SearchCV.fit
should be indexed along with `X` and `y`. Note that this differs
from the scikit-learn version. They pass `X` and compute num_samples.
We pass `num_samples` instead. | [
"Private",
"helper",
"function",
"for",
"parameter",
"value",
"indexing",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/utils.py#L57-L70 | train | 216,169 |
dask/dask-ml | dask_ml/model_selection/utils.py | DeprecationDict.add_warning | def add_warning(self, key, *args, **kwargs):
"""Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key
"""
self._deprecations[key] = (args, kwargs) | python | def add_warning(self, key, *args, **kwargs):
"""Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key
"""
self._deprecations[key] = (args, kwargs) | [
"def",
"add_warning",
"(",
"self",
",",
"key",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_deprecations",
"[",
"key",
"]",
"=",
"(",
"args",
",",
"kwargs",
")"
] | Add a warning to be triggered when the specified key is read
Parameters
----------
key : any hashable object
The key | [
"Add",
"a",
"warning",
"to",
"be",
"triggered",
"when",
"the",
"specified",
"key",
"is",
"read"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/utils.py#L141-L149 | train | 216,170 |
dask/dask-ml | dask_ml/preprocessing/label.py | _construct | def _construct(x, categories):
"""Make a sparse matrix from an encoded array.
>>> construct(np.array([0, 1, 0]), np.array([0, 1])).toarray()
array([[1., 0.],
[0., 1.],
[1., 0.]])
"""
# type: (np.ndarray, np.ndarray) -> scipy.sparse.csr_matrix
data = np.ones(len(x))
rows = np.arange(len(x))
columns = x.ravel()
return scipy.sparse.csr_matrix(
(data, (rows, columns)), shape=(len(x), len(categories))
) | python | def _construct(x, categories):
"""Make a sparse matrix from an encoded array.
>>> construct(np.array([0, 1, 0]), np.array([0, 1])).toarray()
array([[1., 0.],
[0., 1.],
[1., 0.]])
"""
# type: (np.ndarray, np.ndarray) -> scipy.sparse.csr_matrix
data = np.ones(len(x))
rows = np.arange(len(x))
columns = x.ravel()
return scipy.sparse.csr_matrix(
(data, (rows, columns)), shape=(len(x), len(categories))
) | [
"def",
"_construct",
"(",
"x",
",",
"categories",
")",
":",
"# type: (np.ndarray, np.ndarray) -> scipy.sparse.csr_matrix",
"data",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"x",
")",
")",
"rows",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
"... | Make a sparse matrix from an encoded array.
>>> construct(np.array([0, 1, 0]), np.array([0, 1])).toarray()
array([[1., 0.],
[0., 1.],
[1., 0.]]) | [
"Make",
"a",
"sparse",
"matrix",
"from",
"an",
"encoded",
"array",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/label.py#L224-L238 | train | 216,171 |
dask/dask-ml | dask_ml/preprocessing/label.py | _encode_dask_array | def _encode_dask_array(values, uniques=None, encode=False, onehot_dtype=None):
"""One-hot or label encode a dask array.
Parameters
----------
values : da.Array, shape [n_samples,]
unqiques : np.ndarray, shape [n_uniques,]
encode : bool, default False
Whether to encode the values (True) or just discover the uniques.
onehot_dtype : np.dtype, optional
Optional dtype for the resulting one-hot encoded array. This changes
the shape, dtype, and underlying storage of the returned dask array.
======= ================= =========================
thing onehot_dtype=None onehot_dtype=onehot_dtype
======= ================= =========================
shape (n_samples,) (n_samples, len(uniques))
dtype np.intp onehot_dtype
storage np.ndarray scipy.sparse.csr_matrix
======= ================= =========================
Returns
-------
uniques : ndarray
The discovered uniques (uniques=None) or just `uniques`
encoded : da.Array, optional
The encoded values. Only returend when ``encode=True``.
"""
if uniques is None:
if encode and onehot_dtype:
raise ValueError("Cannot use 'encode` and 'onehot_dtype' simultaneously.")
if encode:
uniques, encoded = da.unique(values, return_inverse=True)
return uniques, encoded
else:
return da.unique(values)
if encode:
if onehot_dtype:
dtype = onehot_dtype
new_axis = 1
chunks = values.chunks + (len(uniques),)
else:
dtype = np.dtype("int")
new_axis = None
chunks = values.chunks
return (
uniques,
values.map_blocks(
_check_and_search_block,
uniques,
onehot_dtype=onehot_dtype,
dtype=dtype,
new_axis=new_axis,
chunks=chunks,
),
)
else:
return uniques | python | def _encode_dask_array(values, uniques=None, encode=False, onehot_dtype=None):
"""One-hot or label encode a dask array.
Parameters
----------
values : da.Array, shape [n_samples,]
unqiques : np.ndarray, shape [n_uniques,]
encode : bool, default False
Whether to encode the values (True) or just discover the uniques.
onehot_dtype : np.dtype, optional
Optional dtype for the resulting one-hot encoded array. This changes
the shape, dtype, and underlying storage of the returned dask array.
======= ================= =========================
thing onehot_dtype=None onehot_dtype=onehot_dtype
======= ================= =========================
shape (n_samples,) (n_samples, len(uniques))
dtype np.intp onehot_dtype
storage np.ndarray scipy.sparse.csr_matrix
======= ================= =========================
Returns
-------
uniques : ndarray
The discovered uniques (uniques=None) or just `uniques`
encoded : da.Array, optional
The encoded values. Only returend when ``encode=True``.
"""
if uniques is None:
if encode and onehot_dtype:
raise ValueError("Cannot use 'encode` and 'onehot_dtype' simultaneously.")
if encode:
uniques, encoded = da.unique(values, return_inverse=True)
return uniques, encoded
else:
return da.unique(values)
if encode:
if onehot_dtype:
dtype = onehot_dtype
new_axis = 1
chunks = values.chunks + (len(uniques),)
else:
dtype = np.dtype("int")
new_axis = None
chunks = values.chunks
return (
uniques,
values.map_blocks(
_check_and_search_block,
uniques,
onehot_dtype=onehot_dtype,
dtype=dtype,
new_axis=new_axis,
chunks=chunks,
),
)
else:
return uniques | [
"def",
"_encode_dask_array",
"(",
"values",
",",
"uniques",
"=",
"None",
",",
"encode",
"=",
"False",
",",
"onehot_dtype",
"=",
"None",
")",
":",
"if",
"uniques",
"is",
"None",
":",
"if",
"encode",
"and",
"onehot_dtype",
":",
"raise",
"ValueError",
"(",
... | One-hot or label encode a dask array.
Parameters
----------
values : da.Array, shape [n_samples,]
unqiques : np.ndarray, shape [n_uniques,]
encode : bool, default False
Whether to encode the values (True) or just discover the uniques.
onehot_dtype : np.dtype, optional
Optional dtype for the resulting one-hot encoded array. This changes
the shape, dtype, and underlying storage of the returned dask array.
======= ================= =========================
thing onehot_dtype=None onehot_dtype=onehot_dtype
======= ================= =========================
shape (n_samples,) (n_samples, len(uniques))
dtype np.intp onehot_dtype
storage np.ndarray scipy.sparse.csr_matrix
======= ================= =========================
Returns
-------
uniques : ndarray
The discovered uniques (uniques=None) or just `uniques`
encoded : da.Array, optional
The encoded values. Only returend when ``encode=True``. | [
"One",
"-",
"hot",
"or",
"label",
"encode",
"a",
"dask",
"array",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/label.py#L241-L301 | train | 216,172 |
dask/dask-ml | dask_ml/model_selection/methods.py | pipeline | def pipeline(names, steps):
"""Reconstruct a Pipeline from names and steps"""
steps, times = zip(*map(_maybe_timed, steps))
fit_time = sum(times)
if any(s is FIT_FAILURE for s in steps):
fit_est = FIT_FAILURE
else:
fit_est = Pipeline(list(zip(names, steps)))
return fit_est, fit_time | python | def pipeline(names, steps):
"""Reconstruct a Pipeline from names and steps"""
steps, times = zip(*map(_maybe_timed, steps))
fit_time = sum(times)
if any(s is FIT_FAILURE for s in steps):
fit_est = FIT_FAILURE
else:
fit_est = Pipeline(list(zip(names, steps)))
return fit_est, fit_time | [
"def",
"pipeline",
"(",
"names",
",",
"steps",
")",
":",
"steps",
",",
"times",
"=",
"zip",
"(",
"*",
"map",
"(",
"_maybe_timed",
",",
"steps",
")",
")",
"fit_time",
"=",
"sum",
"(",
"times",
")",
"if",
"any",
"(",
"s",
"is",
"FIT_FAILURE",
"for",
... | Reconstruct a Pipeline from names and steps | [
"Reconstruct",
"a",
"Pipeline",
"from",
"names",
"and",
"steps"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/methods.py#L183-L191 | train | 216,173 |
dask/dask-ml | dask_ml/model_selection/methods.py | feature_union | def feature_union(names, steps, weights):
"""Reconstruct a FeatureUnion from names, steps, and weights"""
steps, times = zip(*map(_maybe_timed, steps))
fit_time = sum(times)
if any(s is FIT_FAILURE for s in steps):
fit_est = FIT_FAILURE
else:
fit_est = FeatureUnion(list(zip(names, steps)), transformer_weights=weights)
return fit_est, fit_time | python | def feature_union(names, steps, weights):
"""Reconstruct a FeatureUnion from names, steps, and weights"""
steps, times = zip(*map(_maybe_timed, steps))
fit_time = sum(times)
if any(s is FIT_FAILURE for s in steps):
fit_est = FIT_FAILURE
else:
fit_est = FeatureUnion(list(zip(names, steps)), transformer_weights=weights)
return fit_est, fit_time | [
"def",
"feature_union",
"(",
"names",
",",
"steps",
",",
"weights",
")",
":",
"steps",
",",
"times",
"=",
"zip",
"(",
"*",
"map",
"(",
"_maybe_timed",
",",
"steps",
")",
")",
"fit_time",
"=",
"sum",
"(",
"times",
")",
"if",
"any",
"(",
"s",
"is",
... | Reconstruct a FeatureUnion from names, steps, and weights | [
"Reconstruct",
"a",
"FeatureUnion",
"from",
"names",
"steps",
"and",
"weights"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/methods.py#L194-L202 | train | 216,174 |
dask/dask-ml | dask_ml/model_selection/methods.py | feature_union_concat | def feature_union_concat(Xs, nsamples, weights):
"""Apply weights and concatenate outputs from a FeatureUnion"""
if any(x is FIT_FAILURE for x in Xs):
return FIT_FAILURE
Xs = [X if w is None else X * w for X, w in zip(Xs, weights) if X is not None]
if not Xs:
return np.zeros((nsamples, 0))
if any(sparse.issparse(f) for f in Xs):
return sparse.hstack(Xs).tocsr()
return np.hstack(Xs) | python | def feature_union_concat(Xs, nsamples, weights):
"""Apply weights and concatenate outputs from a FeatureUnion"""
if any(x is FIT_FAILURE for x in Xs):
return FIT_FAILURE
Xs = [X if w is None else X * w for X, w in zip(Xs, weights) if X is not None]
if not Xs:
return np.zeros((nsamples, 0))
if any(sparse.issparse(f) for f in Xs):
return sparse.hstack(Xs).tocsr()
return np.hstack(Xs) | [
"def",
"feature_union_concat",
"(",
"Xs",
",",
"nsamples",
",",
"weights",
")",
":",
"if",
"any",
"(",
"x",
"is",
"FIT_FAILURE",
"for",
"x",
"in",
"Xs",
")",
":",
"return",
"FIT_FAILURE",
"Xs",
"=",
"[",
"X",
"if",
"w",
"is",
"None",
"else",
"X",
"... | Apply weights and concatenate outputs from a FeatureUnion | [
"Apply",
"weights",
"and",
"concatenate",
"outputs",
"from",
"a",
"FeatureUnion"
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/methods.py#L205-L214 | train | 216,175 |
dask/dask-ml | dask_ml/model_selection/_split.py | _generate_idx | def _generate_idx(n, seed, n_train, n_test):
"""Generate train, test indices for a length-n array.
Parameters
----------
n : int
The length of the array
seed : int
Seed for a RandomState
n_train, n_test : int, 0 < n_train, n_test < n
Number of samples to use for the train or
test index.
Notes
-----
"""
idx = check_random_state(seed).permutation(n)
ind_test = idx[:n_test]
ind_train = idx[n_test : n_train + n_test]
return ind_train, ind_test | python | def _generate_idx(n, seed, n_train, n_test):
"""Generate train, test indices for a length-n array.
Parameters
----------
n : int
The length of the array
seed : int
Seed for a RandomState
n_train, n_test : int, 0 < n_train, n_test < n
Number of samples to use for the train or
test index.
Notes
-----
"""
idx = check_random_state(seed).permutation(n)
ind_test = idx[:n_test]
ind_train = idx[n_test : n_train + n_test]
return ind_train, ind_test | [
"def",
"_generate_idx",
"(",
"n",
",",
"seed",
",",
"n_train",
",",
"n_test",
")",
":",
"idx",
"=",
"check_random_state",
"(",
"seed",
")",
".",
"permutation",
"(",
"n",
")",
"ind_test",
"=",
"idx",
"[",
":",
"n_test",
"]",
"ind_train",
"=",
"idx",
"... | Generate train, test indices for a length-n array.
Parameters
----------
n : int
The length of the array
seed : int
Seed for a RandomState
n_train, n_test : int, 0 < n_train, n_test < n
Number of samples to use for the train or
test index.
Notes
----- | [
"Generate",
"train",
"test",
"indices",
"for",
"a",
"length",
"-",
"n",
"array",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_split.py#L67-L87 | train | 216,176 |
dask/dask-ml | dask_ml/model_selection/_split.py | _blockwise_slice | def _blockwise_slice(arr, idx):
"""Slice an array that is blockwise-aligned with idx.
Parameters
----------
arr : Dask array
idx : Dask array
Should have the following properties
* Same blocks as `arr` along the first dimension
* Contains only integers
* Each block's values should be between ``[0, len(block))``
Returns
-------
sliced : dask.Array
"""
objs = []
offsets = np.hstack([0, np.cumsum(arr.chunks[0])[:-1]])
for i, (x, idx2) in enumerate(
zip(arr.to_delayed().ravel(), idx.to_delayed().ravel())
):
idx3 = idx2 - offsets[i]
objs.append(x[idx3])
shapes = idx.chunks[0]
if arr.ndim == 2:
P = arr.shape[1]
shapes = [(x, P) for x in shapes]
else:
shapes = [(x,) for x in shapes]
sliced = da.concatenate(
[
da.from_delayed(x, shape=shape, dtype=arr.dtype)
for x, shape in zip(objs, shapes)
]
)
return sliced | python | def _blockwise_slice(arr, idx):
"""Slice an array that is blockwise-aligned with idx.
Parameters
----------
arr : Dask array
idx : Dask array
Should have the following properties
* Same blocks as `arr` along the first dimension
* Contains only integers
* Each block's values should be between ``[0, len(block))``
Returns
-------
sliced : dask.Array
"""
objs = []
offsets = np.hstack([0, np.cumsum(arr.chunks[0])[:-1]])
for i, (x, idx2) in enumerate(
zip(arr.to_delayed().ravel(), idx.to_delayed().ravel())
):
idx3 = idx2 - offsets[i]
objs.append(x[idx3])
shapes = idx.chunks[0]
if arr.ndim == 2:
P = arr.shape[1]
shapes = [(x, P) for x in shapes]
else:
shapes = [(x,) for x in shapes]
sliced = da.concatenate(
[
da.from_delayed(x, shape=shape, dtype=arr.dtype)
for x, shape in zip(objs, shapes)
]
)
return sliced | [
"def",
"_blockwise_slice",
"(",
"arr",
",",
"idx",
")",
":",
"objs",
"=",
"[",
"]",
"offsets",
"=",
"np",
".",
"hstack",
"(",
"[",
"0",
",",
"np",
".",
"cumsum",
"(",
"arr",
".",
"chunks",
"[",
"0",
"]",
")",
"[",
":",
"-",
"1",
"]",
"]",
"... | Slice an array that is blockwise-aligned with idx.
Parameters
----------
arr : Dask array
idx : Dask array
Should have the following properties
* Same blocks as `arr` along the first dimension
* Contains only integers
* Each block's values should be between ``[0, len(block))``
Returns
-------
sliced : dask.Array | [
"Slice",
"an",
"array",
"that",
"is",
"blockwise",
"-",
"aligned",
"with",
"idx",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_split.py#L317-L356 | train | 216,177 |
dask/dask-ml | dask_ml/feature_extraction/text.py | HashingVectorizer.transform | def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Transformation is done in parallel, and correctly handles dask
collections.
Parameters
----------
X : dask.Bag of raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : dask.array.Array, shape = (n_samples, self.n_features)
Document-term matrix. Each block of the array is a scipy sparse
matrix.
Notes
-----
The returned dask Array is composed scipy sparse matricies. If you need
to compute on the result immediately, you may need to convert the individual
blocks to ndarrays or pydata/sparse matricies.
>>> import sparse
>>> X.map_blocks(sparse.COO.from_scipy_sparse, dtype=X.dtype) # doctest: +SKIP
See the :doc:`examples/text-vectorization` for more.
"""
msg = "'X' should be a 1-dimensional array with length 'num_samples'."
if not dask.is_dask_collection(X):
return super(HashingVectorizer, self).transform(X)
if isinstance(X, db.Bag):
bag2 = X.map_partitions(_transform, estimator=self)
objs = bag2.to_delayed()
arrs = [
da.from_delayed(obj, (np.nan, self.n_features), self.dtype)
for obj in objs
]
result = da.concatenate(arrs, axis=0)
elif isinstance(X, dd.Series):
result = X.map_partitions(_transform, self)
elif isinstance(X, da.Array):
# dask.Array
chunks = ((np.nan,) * X.numblocks[0], (self.n_features,))
if X.ndim == 1:
result = X.map_blocks(
_transform, estimator=self, dtype="f8", chunks=chunks, new_axis=1
)
else:
raise ValueError(msg)
else:
raise ValueError(msg)
return result | python | def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Transformation is done in parallel, and correctly handles dask
collections.
Parameters
----------
X : dask.Bag of raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : dask.array.Array, shape = (n_samples, self.n_features)
Document-term matrix. Each block of the array is a scipy sparse
matrix.
Notes
-----
The returned dask Array is composed scipy sparse matricies. If you need
to compute on the result immediately, you may need to convert the individual
blocks to ndarrays or pydata/sparse matricies.
>>> import sparse
>>> X.map_blocks(sparse.COO.from_scipy_sparse, dtype=X.dtype) # doctest: +SKIP
See the :doc:`examples/text-vectorization` for more.
"""
msg = "'X' should be a 1-dimensional array with length 'num_samples'."
if not dask.is_dask_collection(X):
return super(HashingVectorizer, self).transform(X)
if isinstance(X, db.Bag):
bag2 = X.map_partitions(_transform, estimator=self)
objs = bag2.to_delayed()
arrs = [
da.from_delayed(obj, (np.nan, self.n_features), self.dtype)
for obj in objs
]
result = da.concatenate(arrs, axis=0)
elif isinstance(X, dd.Series):
result = X.map_partitions(_transform, self)
elif isinstance(X, da.Array):
# dask.Array
chunks = ((np.nan,) * X.numblocks[0], (self.n_features,))
if X.ndim == 1:
result = X.map_blocks(
_transform, estimator=self, dtype="f8", chunks=chunks, new_axis=1
)
else:
raise ValueError(msg)
else:
raise ValueError(msg)
return result | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"msg",
"=",
"\"'X' should be a 1-dimensional array with length 'num_samples'.\"",
"if",
"not",
"dask",
".",
"is_dask_collection",
"(",
"X",
")",
":",
"return",
"super",
"(",
"HashingVectorizer",
",",
"self",
")"... | Transform a sequence of documents to a document-term matrix.
Transformation is done in parallel, and correctly handles dask
collections.
Parameters
----------
X : dask.Bag of raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : dask.array.Array, shape = (n_samples, self.n_features)
Document-term matrix. Each block of the array is a scipy sparse
matrix.
Notes
-----
The returned dask Array is composed scipy sparse matricies. If you need
to compute on the result immediately, you may need to convert the individual
blocks to ndarrays or pydata/sparse matricies.
>>> import sparse
>>> X.map_blocks(sparse.COO.from_scipy_sparse, dtype=X.dtype) # doctest: +SKIP
See the :doc:`examples/text-vectorization` for more. | [
"Transform",
"a",
"sequence",
"of",
"documents",
"to",
"a",
"document",
"-",
"term",
"matrix",
"."
] | cc4837c2c2101f9302cac38354b55754263cd1f3 | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/feature_extraction/text.py#L10-L67 | train | 216,178 |
MIT-LCP/wfdb-python | wfdb/processing/peaks.py | correct_peaks | def correct_peaks(sig, peak_inds, search_radius, smooth_window_size,
peak_dir='compare'):
"""
Adjust a set of detected peaks to coincide with local signal maxima,
and
Parameters
----------
sig : numpy array
The 1d signal array
peak_inds : np array
Array of the original peak indices
max_gap : int
The radius within which the original peaks may be shifted.
smooth_window_size : int
The window size of the moving average filter applied on the
signal. Peak distance is calculated on the difference between
the original and smoothed signal.
peak_dir : str, optional
The expected peak direction: 'up' or 'down', 'both', or
'compare'.
- If 'up', the peaks will be shifted to local maxima
- If 'down', the peaks will be shifted to local minima
- If 'both', the peaks will be shifted to local maxima of the
rectified signal
- If 'compare', the function will try both 'up' and 'down'
options, and choose the direction that gives the largest mean
distance from the smoothed signal.
Returns
-------
corrected_peak_inds : numpy array
Array of the corrected peak indices
Examples
--------
"""
sig_len = sig.shape[0]
n_peaks = len(peak_inds)
# Subtract the smoothed signal from the original
sig = sig - smooth(sig=sig, window_size=smooth_window_size)
# Shift peaks to local maxima
if peak_dir == 'up':
shifted_peak_inds = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
elif peak_dir == 'down':
shifted_peak_inds = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=False)
elif peak_dir == 'both':
shifted_peak_inds = shift_peaks(sig=np.abs(sig),
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
else:
shifted_peak_inds_up = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
shifted_peak_inds_down = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=False)
# Choose the direction with the biggest deviation
up_dist = np.mean(np.abs(sig[shifted_peak_inds_up]))
down_dist = np.mean(np.abs(sig[shifted_peak_inds_down]))
if up_dist >= down_dist:
shifted_peak_inds = shifted_peak_inds_up
else:
shifted_peak_inds = shifted_peak_inds_down
return shifted_peak_inds | python | def correct_peaks(sig, peak_inds, search_radius, smooth_window_size,
peak_dir='compare'):
"""
Adjust a set of detected peaks to coincide with local signal maxima,
and
Parameters
----------
sig : numpy array
The 1d signal array
peak_inds : np array
Array of the original peak indices
max_gap : int
The radius within which the original peaks may be shifted.
smooth_window_size : int
The window size of the moving average filter applied on the
signal. Peak distance is calculated on the difference between
the original and smoothed signal.
peak_dir : str, optional
The expected peak direction: 'up' or 'down', 'both', or
'compare'.
- If 'up', the peaks will be shifted to local maxima
- If 'down', the peaks will be shifted to local minima
- If 'both', the peaks will be shifted to local maxima of the
rectified signal
- If 'compare', the function will try both 'up' and 'down'
options, and choose the direction that gives the largest mean
distance from the smoothed signal.
Returns
-------
corrected_peak_inds : numpy array
Array of the corrected peak indices
Examples
--------
"""
sig_len = sig.shape[0]
n_peaks = len(peak_inds)
# Subtract the smoothed signal from the original
sig = sig - smooth(sig=sig, window_size=smooth_window_size)
# Shift peaks to local maxima
if peak_dir == 'up':
shifted_peak_inds = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
elif peak_dir == 'down':
shifted_peak_inds = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=False)
elif peak_dir == 'both':
shifted_peak_inds = shift_peaks(sig=np.abs(sig),
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
else:
shifted_peak_inds_up = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
shifted_peak_inds_down = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=False)
# Choose the direction with the biggest deviation
up_dist = np.mean(np.abs(sig[shifted_peak_inds_up]))
down_dist = np.mean(np.abs(sig[shifted_peak_inds_down]))
if up_dist >= down_dist:
shifted_peak_inds = shifted_peak_inds_up
else:
shifted_peak_inds = shifted_peak_inds_down
return shifted_peak_inds | [
"def",
"correct_peaks",
"(",
"sig",
",",
"peak_inds",
",",
"search_radius",
",",
"smooth_window_size",
",",
"peak_dir",
"=",
"'compare'",
")",
":",
"sig_len",
"=",
"sig",
".",
"shape",
"[",
"0",
"]",
"n_peaks",
"=",
"len",
"(",
"peak_inds",
")",
"# Subtrac... | Adjust a set of detected peaks to coincide with local signal maxima,
and
Parameters
----------
sig : numpy array
The 1d signal array
peak_inds : np array
Array of the original peak indices
max_gap : int
The radius within which the original peaks may be shifted.
smooth_window_size : int
The window size of the moving average filter applied on the
signal. Peak distance is calculated on the difference between
the original and smoothed signal.
peak_dir : str, optional
The expected peak direction: 'up' or 'down', 'both', or
'compare'.
- If 'up', the peaks will be shifted to local maxima
- If 'down', the peaks will be shifted to local minima
- If 'both', the peaks will be shifted to local maxima of the
rectified signal
- If 'compare', the function will try both 'up' and 'down'
options, and choose the direction that gives the largest mean
distance from the smoothed signal.
Returns
-------
corrected_peak_inds : numpy array
Array of the corrected peak indices
Examples
-------- | [
"Adjust",
"a",
"set",
"of",
"detected",
"peaks",
"to",
"coincide",
"with",
"local",
"signal",
"maxima",
"and"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/peaks.py#L106-L188 | train | 216,179 |
MIT-LCP/wfdb-python | wfdb/processing/peaks.py | shift_peaks | def shift_peaks(sig, peak_inds, search_radius, peak_up):
"""
Helper function for correct_peaks. Return the shifted peaks to local
maxima or minima within a radius.
peak_up : bool
Whether the expected peak direction is up
"""
sig_len = sig.shape[0]
n_peaks = len(peak_inds)
# The indices to shift each peak ind by
shift_inds = np.zeros(n_peaks, dtype='int')
# Iterate through peaks
for i in range(n_peaks):
ind = peak_inds[i]
local_sig = sig[max(0, ind - search_radius):min(ind + search_radius, sig_len-1)]
if peak_up:
shift_inds[i] = np.argmax(local_sig)
else:
shift_inds[i] = np.argmin(local_sig)
# May have to adjust early values
for i in range(n_peaks):
ind = peak_inds[i]
if ind >= search_radius:
break
shift_inds[i] -= search_radius - ind
shifted_peak_inds = peak_inds + shift_inds - search_radius
return shifted_peak_inds | python | def shift_peaks(sig, peak_inds, search_radius, peak_up):
"""
Helper function for correct_peaks. Return the shifted peaks to local
maxima or minima within a radius.
peak_up : bool
Whether the expected peak direction is up
"""
sig_len = sig.shape[0]
n_peaks = len(peak_inds)
# The indices to shift each peak ind by
shift_inds = np.zeros(n_peaks, dtype='int')
# Iterate through peaks
for i in range(n_peaks):
ind = peak_inds[i]
local_sig = sig[max(0, ind - search_radius):min(ind + search_radius, sig_len-1)]
if peak_up:
shift_inds[i] = np.argmax(local_sig)
else:
shift_inds[i] = np.argmin(local_sig)
# May have to adjust early values
for i in range(n_peaks):
ind = peak_inds[i]
if ind >= search_radius:
break
shift_inds[i] -= search_radius - ind
shifted_peak_inds = peak_inds + shift_inds - search_radius
return shifted_peak_inds | [
"def",
"shift_peaks",
"(",
"sig",
",",
"peak_inds",
",",
"search_radius",
",",
"peak_up",
")",
":",
"sig_len",
"=",
"sig",
".",
"shape",
"[",
"0",
"]",
"n_peaks",
"=",
"len",
"(",
"peak_inds",
")",
"# The indices to shift each peak ind by",
"shift_inds",
"=",
... | Helper function for correct_peaks. Return the shifted peaks to local
maxima or minima within a radius.
peak_up : bool
Whether the expected peak direction is up | [
"Helper",
"function",
"for",
"correct_peaks",
".",
"Return",
"the",
"shifted",
"peaks",
"to",
"local",
"maxima",
"or",
"minima",
"within",
"a",
"radius",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/peaks.py#L191-L223 | train | 216,180 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | get_plot_dims | def get_plot_dims(signal, ann_samp):
"Figure out the number of plot channels"
if signal is not None:
if signal.ndim == 1:
sig_len = len(signal)
n_sig = 1
else:
sig_len = signal.shape[0]
n_sig = signal.shape[1]
else:
sig_len = 0
n_sig = 0
if ann_samp is not None:
n_annot = len(ann_samp)
else:
n_annot = 0
return sig_len, n_sig, n_annot, max(n_sig, n_annot) | python | def get_plot_dims(signal, ann_samp):
"Figure out the number of plot channels"
if signal is not None:
if signal.ndim == 1:
sig_len = len(signal)
n_sig = 1
else:
sig_len = signal.shape[0]
n_sig = signal.shape[1]
else:
sig_len = 0
n_sig = 0
if ann_samp is not None:
n_annot = len(ann_samp)
else:
n_annot = 0
return sig_len, n_sig, n_annot, max(n_sig, n_annot) | [
"def",
"get_plot_dims",
"(",
"signal",
",",
"ann_samp",
")",
":",
"if",
"signal",
"is",
"not",
"None",
":",
"if",
"signal",
".",
"ndim",
"==",
"1",
":",
"sig_len",
"=",
"len",
"(",
"signal",
")",
"n_sig",
"=",
"1",
"else",
":",
"sig_len",
"=",
"sig... | Figure out the number of plot channels | [
"Figure",
"out",
"the",
"number",
"of",
"plot",
"channels"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L125-L143 | train | 216,181 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | create_figure | def create_figure(n_subplots, figsize):
"Create the plot figure and subplot axes"
fig = plt.figure(figsize=figsize)
axes = []
for i in range(n_subplots):
axes.append(fig.add_subplot(n_subplots, 1, i+1))
return fig, axes | python | def create_figure(n_subplots, figsize):
"Create the plot figure and subplot axes"
fig = plt.figure(figsize=figsize)
axes = []
for i in range(n_subplots):
axes.append(fig.add_subplot(n_subplots, 1, i+1))
return fig, axes | [
"def",
"create_figure",
"(",
"n_subplots",
",",
"figsize",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"axes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_subplots",
")",
":",
"axes",
".",
"append",
"(",
"... | Create the plot figure and subplot axes | [
"Create",
"the",
"plot",
"figure",
"and",
"subplot",
"axes"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L146-L154 | train | 216,182 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | plot_signal | def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes):
"Plot signal channels"
# Extend signal style if necesary
if len(sig_style) == 1:
sig_style = n_sig * sig_style
# Figure out time indices
if time_units == 'samples':
t = np.linspace(0, sig_len-1, sig_len)
else:
downsample_factor = {'seconds':fs, 'minutes':fs * 60,
'hours':fs * 3600}
t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units]
# Plot the signals
if signal.ndim == 1:
axes[0].plot(t, signal, sig_style[0], zorder=3)
else:
for ch in range(n_sig):
axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3) | python | def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes):
"Plot signal channels"
# Extend signal style if necesary
if len(sig_style) == 1:
sig_style = n_sig * sig_style
# Figure out time indices
if time_units == 'samples':
t = np.linspace(0, sig_len-1, sig_len)
else:
downsample_factor = {'seconds':fs, 'minutes':fs * 60,
'hours':fs * 3600}
t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units]
# Plot the signals
if signal.ndim == 1:
axes[0].plot(t, signal, sig_style[0], zorder=3)
else:
for ch in range(n_sig):
axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3) | [
"def",
"plot_signal",
"(",
"signal",
",",
"sig_len",
",",
"n_sig",
",",
"fs",
",",
"time_units",
",",
"sig_style",
",",
"axes",
")",
":",
"# Extend signal style if necesary",
"if",
"len",
"(",
"sig_style",
")",
"==",
"1",
":",
"sig_style",
"=",
"n_sig",
"*... | Plot signal channels | [
"Plot",
"signal",
"channels"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L157-L177 | train | 216,183 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | plot_annotation | def plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs, time_units,
ann_style, axes):
"Plot annotations, possibly overlaid on signals"
# Extend annotation style if necesary
if len(ann_style) == 1:
ann_style = n_annot * ann_style
# Figure out downsample factor for time indices
if time_units == 'samples':
downsample_factor = 1
else:
downsample_factor = {'seconds':float(fs), 'minutes':float(fs)*60,
'hours':float(fs)*3600}[time_units]
# Plot the annotations
for ch in range(n_annot):
if ann_samp[ch] is not None and len(ann_samp[ch]):
# Figure out the y values to plot on a channel basis
# 1 dimensional signals
if n_sig > ch:
if signal.ndim == 1:
y = signal[ann_samp[ch]]
else:
y = signal[ann_samp[ch], ch]
else:
y = np.zeros(len(ann_samp[ch]))
axes[ch].plot(ann_samp[ch] / downsample_factor, y, ann_style[ch])
# Plot the annotation symbols if any
if ann_sym is not None and ann_sym[ch] is not None:
for i, s in enumerate(ann_sym[ch]):
axes[ch].annotate(s, (ann_samp[ch][i] / downsample_factor,
y[i])) | python | def plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs, time_units,
ann_style, axes):
"Plot annotations, possibly overlaid on signals"
# Extend annotation style if necesary
if len(ann_style) == 1:
ann_style = n_annot * ann_style
# Figure out downsample factor for time indices
if time_units == 'samples':
downsample_factor = 1
else:
downsample_factor = {'seconds':float(fs), 'minutes':float(fs)*60,
'hours':float(fs)*3600}[time_units]
# Plot the annotations
for ch in range(n_annot):
if ann_samp[ch] is not None and len(ann_samp[ch]):
# Figure out the y values to plot on a channel basis
# 1 dimensional signals
if n_sig > ch:
if signal.ndim == 1:
y = signal[ann_samp[ch]]
else:
y = signal[ann_samp[ch], ch]
else:
y = np.zeros(len(ann_samp[ch]))
axes[ch].plot(ann_samp[ch] / downsample_factor, y, ann_style[ch])
# Plot the annotation symbols if any
if ann_sym is not None and ann_sym[ch] is not None:
for i, s in enumerate(ann_sym[ch]):
axes[ch].annotate(s, (ann_samp[ch][i] / downsample_factor,
y[i])) | [
"def",
"plot_annotation",
"(",
"ann_samp",
",",
"n_annot",
",",
"ann_sym",
",",
"signal",
",",
"n_sig",
",",
"fs",
",",
"time_units",
",",
"ann_style",
",",
"axes",
")",
":",
"# Extend annotation style if necesary",
"if",
"len",
"(",
"ann_style",
")",
"==",
... | Plot annotations, possibly overlaid on signals | [
"Plot",
"annotations",
"possibly",
"overlaid",
"on",
"signals"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L180-L214 | train | 216,184 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | plot_ecg_grids | def plot_ecg_grids(ecg_grids, fs, units, time_units, axes):
"Add ecg grids to the axes"
if ecg_grids == 'all':
ecg_grids = range(0, len(axes))
for ch in ecg_grids:
# Get the initial plot limits
auto_xlims = axes[ch].get_xlim()
auto_ylims= axes[ch].get_ylim()
(major_ticks_x, minor_ticks_x, major_ticks_y,
minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1],
units[ch], fs, auto_xlims[1],
time_units)
min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x)
min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y)
for tick in minor_ticks_x:
axes[ch].plot([tick, tick], [min_y, max_y], c='#ededed',
marker='|', zorder=1)
for tick in major_ticks_x:
axes[ch].plot([tick, tick], [min_y, max_y], c='#bababa',
marker='|', zorder=2)
for tick in minor_ticks_y:
axes[ch].plot([min_x, max_x], [tick, tick], c='#ededed',
marker='_', zorder=1)
for tick in major_ticks_y:
axes[ch].plot([min_x, max_x], [tick, tick], c='#bababa',
marker='_', zorder=2)
# Plotting the lines changes the graph. Set the limits back
axes[ch].set_xlim(auto_xlims)
axes[ch].set_ylim(auto_ylims) | python | def plot_ecg_grids(ecg_grids, fs, units, time_units, axes):
"Add ecg grids to the axes"
if ecg_grids == 'all':
ecg_grids = range(0, len(axes))
for ch in ecg_grids:
# Get the initial plot limits
auto_xlims = axes[ch].get_xlim()
auto_ylims= axes[ch].get_ylim()
(major_ticks_x, minor_ticks_x, major_ticks_y,
minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1],
units[ch], fs, auto_xlims[1],
time_units)
min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x)
min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y)
for tick in minor_ticks_x:
axes[ch].plot([tick, tick], [min_y, max_y], c='#ededed',
marker='|', zorder=1)
for tick in major_ticks_x:
axes[ch].plot([tick, tick], [min_y, max_y], c='#bababa',
marker='|', zorder=2)
for tick in minor_ticks_y:
axes[ch].plot([min_x, max_x], [tick, tick], c='#ededed',
marker='_', zorder=1)
for tick in major_ticks_y:
axes[ch].plot([min_x, max_x], [tick, tick], c='#bababa',
marker='_', zorder=2)
# Plotting the lines changes the graph. Set the limits back
axes[ch].set_xlim(auto_xlims)
axes[ch].set_ylim(auto_ylims) | [
"def",
"plot_ecg_grids",
"(",
"ecg_grids",
",",
"fs",
",",
"units",
",",
"time_units",
",",
"axes",
")",
":",
"if",
"ecg_grids",
"==",
"'all'",
":",
"ecg_grids",
"=",
"range",
"(",
"0",
",",
"len",
"(",
"axes",
")",
")",
"for",
"ch",
"in",
"ecg_grids... | Add ecg grids to the axes | [
"Add",
"ecg",
"grids",
"to",
"the",
"axes"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L217-L251 | train | 216,185 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | calc_ecg_grids | def calc_ecg_grids(minsig, maxsig, sig_units, fs, maxt, time_units):
"""
Calculate tick intervals for ecg grids
- 5mm 0.2s major grids, 0.04s minor grids
- 0.5mV major grids, 0.125 minor grids
10 mm is equal to 1mV in voltage.
"""
# Get the grid interval of the x axis
if time_units == 'samples':
majorx = 0.2 * fs
minorx = 0.04 * fs
elif time_units == 'seconds':
majorx = 0.2
minorx = 0.04
elif time_units == 'minutes':
majorx = 0.2 / 60
minorx = 0.04/60
elif time_units == 'hours':
majorx = 0.2 / 3600
minorx = 0.04 / 3600
# Get the grid interval of the y axis
if sig_units.lower()=='uv':
majory = 500
minory = 125
elif sig_units.lower()=='mv':
majory = 0.5
minory = 0.125
elif sig_units.lower()=='v':
majory = 0.0005
minory = 0.000125
else:
raise ValueError('Signal units must be uV, mV, or V to plot ECG grids.')
major_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, majorx)
minor_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, minorx)
major_ticks_y = np.arange(downround(minsig, majory),
upround(maxsig, majory) + 0.0001, majory)
minor_ticks_y = np.arange(downround(minsig, majory),
upround(maxsig, majory) + 0.0001, minory)
return (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y) | python | def calc_ecg_grids(minsig, maxsig, sig_units, fs, maxt, time_units):
"""
Calculate tick intervals for ecg grids
- 5mm 0.2s major grids, 0.04s minor grids
- 0.5mV major grids, 0.125 minor grids
10 mm is equal to 1mV in voltage.
"""
# Get the grid interval of the x axis
if time_units == 'samples':
majorx = 0.2 * fs
minorx = 0.04 * fs
elif time_units == 'seconds':
majorx = 0.2
minorx = 0.04
elif time_units == 'minutes':
majorx = 0.2 / 60
minorx = 0.04/60
elif time_units == 'hours':
majorx = 0.2 / 3600
minorx = 0.04 / 3600
# Get the grid interval of the y axis
if sig_units.lower()=='uv':
majory = 500
minory = 125
elif sig_units.lower()=='mv':
majory = 0.5
minory = 0.125
elif sig_units.lower()=='v':
majory = 0.0005
minory = 0.000125
else:
raise ValueError('Signal units must be uV, mV, or V to plot ECG grids.')
major_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, majorx)
minor_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, minorx)
major_ticks_y = np.arange(downround(minsig, majory),
upround(maxsig, majory) + 0.0001, majory)
minor_ticks_y = np.arange(downround(minsig, majory),
upround(maxsig, majory) + 0.0001, minory)
return (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y) | [
"def",
"calc_ecg_grids",
"(",
"minsig",
",",
"maxsig",
",",
"sig_units",
",",
"fs",
",",
"maxt",
",",
"time_units",
")",
":",
"# Get the grid interval of the x axis",
"if",
"time_units",
"==",
"'samples'",
":",
"majorx",
"=",
"0.2",
"*",
"fs",
"minorx",
"=",
... | Calculate tick intervals for ecg grids
- 5mm 0.2s major grids, 0.04s minor grids
- 0.5mV major grids, 0.125 minor grids
10 mm is equal to 1mV in voltage. | [
"Calculate",
"tick",
"intervals",
"for",
"ecg",
"grids"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L253-L297 | train | 216,186 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | label_figure | def label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel,
title):
"Add title, and axes labels"
if title:
axes[0].set_title(title)
# Determine y label
# Explicit labels take precedence if present. Otherwise, construct labels
# using signal names and units
if not ylabel:
ylabel = []
# Set default channel and signal names if needed
if not sig_name:
sig_name = ['ch_'+str(i) for i in range(n_subplots)]
if not sig_units:
sig_units = n_subplots * ['NU']
ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)]
# If there are annotations with channels outside of signal range
# put placeholders
n_missing_labels = n_subplots - len(ylabel)
if n_missing_labels:
ylabel = ylabel + ['ch_%d/NU' % i for i in range(len(ylabel),
n_subplots)]
for ch in range(n_subplots):
axes[ch].set_ylabel(ylabel[ch])
axes[-1].set_xlabel('/'.join(['time', time_units[:-1]])) | python | def label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel,
title):
"Add title, and axes labels"
if title:
axes[0].set_title(title)
# Determine y label
# Explicit labels take precedence if present. Otherwise, construct labels
# using signal names and units
if not ylabel:
ylabel = []
# Set default channel and signal names if needed
if not sig_name:
sig_name = ['ch_'+str(i) for i in range(n_subplots)]
if not sig_units:
sig_units = n_subplots * ['NU']
ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)]
# If there are annotations with channels outside of signal range
# put placeholders
n_missing_labels = n_subplots - len(ylabel)
if n_missing_labels:
ylabel = ylabel + ['ch_%d/NU' % i for i in range(len(ylabel),
n_subplots)]
for ch in range(n_subplots):
axes[ch].set_ylabel(ylabel[ch])
axes[-1].set_xlabel('/'.join(['time', time_units[:-1]])) | [
"def",
"label_figure",
"(",
"axes",
",",
"n_subplots",
",",
"time_units",
",",
"sig_name",
",",
"sig_units",
",",
"ylabel",
",",
"title",
")",
":",
"if",
"title",
":",
"axes",
"[",
"0",
"]",
".",
"set_title",
"(",
"title",
")",
"# Determine y label",
"# ... | Add title, and axes labels | [
"Add",
"title",
"and",
"axes",
"labels"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L300-L329 | train | 216,187 |
MIT-LCP/wfdb-python | wfdb/plot/plot.py | get_wfdb_plot_items | def get_wfdb_plot_items(record, annotation, plot_sym):
"""
Get items to plot from wfdb objects
"""
# Get record attributes
if record:
if record.p_signal is not None:
signal = record.p_signal
elif record.d_signal is not None:
signal = record.d_signal
else:
raise ValueError('The record has no signal to plot')
fs = record.fs
sig_name = record.sig_name
sig_units = record.units
record_name = 'Record: %s' % record.record_name
ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)]
else:
signal = fs = ylabel = record_name = None
# Get annotation attributes
if annotation:
# Get channels
ann_chans = set(annotation.chan)
n_ann_chans = max(ann_chans) + 1
# Indices for each channel
chan_inds = n_ann_chans * [np.empty(0, dtype='int')]
for chan in ann_chans:
chan_inds[chan] = np.where(annotation.chan == chan)[0]
ann_samp = [annotation.sample[ci] for ci in chan_inds]
if plot_sym:
ann_sym = n_ann_chans * [None]
for ch in ann_chans:
ann_sym[ch] = [annotation.symbol[ci] for ci in chan_inds[ch]]
else:
ann_sym = None
# Try to get fs from annotation if not already in record
if fs is None:
fs = annotation.fs
record_name = record_name or annotation.record_name
else:
ann_samp = None
ann_sym = None
# Cleaning: remove empty channels and set labels and styles.
# Wrangle together the signal and annotation channels if necessary
if record and annotation:
# There may be instances in which the annotation `chan`
# attribute has non-overlapping channels with the signal.
# In this case, omit empty middle channels. This function should
# already process labels and arrangements before passing into
# `plot_items`
sig_chans = set(range(signal.shape[1]))
all_chans = sorted(sig_chans.union(ann_chans))
# Need to update ylabels and annotation values
if sig_chans != all_chans:
compact_ann_samp = []
if plot_sym:
compact_ann_sym = []
else:
compact_ann_sym = None
ylabel = []
for ch in all_chans: # ie. 0, 1, 9
if ch in ann_chans:
compact_ann_samp.append(ann_samp[ch])
if plot_sym:
compact_ann_sym.append(ann_sym[ch])
if ch in sig_chans:
ylabel.append(''.join([sig_name[ch], sig_units[ch]]))
else:
ylabel.append('ch_%d/NU' % ch)
ann_samp = compact_ann_samp
ann_sym = compact_ann_sym
# Signals encompass annotations
else:
ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)]
# Remove any empty middle channels from annotations
elif annotation:
ann_samp = [a for a in ann_samp if a.size]
if ann_sym is not None:
ann_sym = [a for a in ann_sym if a]
ylabel = ['ch_%d/NU' % ch for ch in ann_chans]
return signal, ann_samp, ann_sym, fs, ylabel, record_name | python | def get_wfdb_plot_items(record, annotation, plot_sym):
"""
Get items to plot from wfdb objects
"""
# Get record attributes
if record:
if record.p_signal is not None:
signal = record.p_signal
elif record.d_signal is not None:
signal = record.d_signal
else:
raise ValueError('The record has no signal to plot')
fs = record.fs
sig_name = record.sig_name
sig_units = record.units
record_name = 'Record: %s' % record.record_name
ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)]
else:
signal = fs = ylabel = record_name = None
# Get annotation attributes
if annotation:
# Get channels
ann_chans = set(annotation.chan)
n_ann_chans = max(ann_chans) + 1
# Indices for each channel
chan_inds = n_ann_chans * [np.empty(0, dtype='int')]
for chan in ann_chans:
chan_inds[chan] = np.where(annotation.chan == chan)[0]
ann_samp = [annotation.sample[ci] for ci in chan_inds]
if plot_sym:
ann_sym = n_ann_chans * [None]
for ch in ann_chans:
ann_sym[ch] = [annotation.symbol[ci] for ci in chan_inds[ch]]
else:
ann_sym = None
# Try to get fs from annotation if not already in record
if fs is None:
fs = annotation.fs
record_name = record_name or annotation.record_name
else:
ann_samp = None
ann_sym = None
# Cleaning: remove empty channels and set labels and styles.
# Wrangle together the signal and annotation channels if necessary
if record and annotation:
# There may be instances in which the annotation `chan`
# attribute has non-overlapping channels with the signal.
# In this case, omit empty middle channels. This function should
# already process labels and arrangements before passing into
# `plot_items`
sig_chans = set(range(signal.shape[1]))
all_chans = sorted(sig_chans.union(ann_chans))
# Need to update ylabels and annotation values
if sig_chans != all_chans:
compact_ann_samp = []
if plot_sym:
compact_ann_sym = []
else:
compact_ann_sym = None
ylabel = []
for ch in all_chans: # ie. 0, 1, 9
if ch in ann_chans:
compact_ann_samp.append(ann_samp[ch])
if plot_sym:
compact_ann_sym.append(ann_sym[ch])
if ch in sig_chans:
ylabel.append(''.join([sig_name[ch], sig_units[ch]]))
else:
ylabel.append('ch_%d/NU' % ch)
ann_samp = compact_ann_samp
ann_sym = compact_ann_sym
# Signals encompass annotations
else:
ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)]
# Remove any empty middle channels from annotations
elif annotation:
ann_samp = [a for a in ann_samp if a.size]
if ann_sym is not None:
ann_sym = [a for a in ann_sym if a]
ylabel = ['ch_%d/NU' % ch for ch in ann_chans]
return signal, ann_samp, ann_sym, fs, ylabel, record_name | [
"def",
"get_wfdb_plot_items",
"(",
"record",
",",
"annotation",
",",
"plot_sym",
")",
":",
"# Get record attributes",
"if",
"record",
":",
"if",
"record",
".",
"p_signal",
"is",
"not",
"None",
":",
"signal",
"=",
"record",
".",
"p_signal",
"elif",
"record",
... | Get items to plot from wfdb objects | [
"Get",
"items",
"to",
"plot",
"from",
"wfdb",
"objects"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L417-L510 | train | 216,188 |
MIT-LCP/wfdb-python | wfdb/io/download.py | _remote_file_size | def _remote_file_size(url=None, file_name=None, pb_dir=None):
"""
Get the remote file size in bytes
Parameters
----------
url : str, optional
The full url of the file. Use this option to explicitly
state the full url.
file_name : str, optional
The base file name. Use this argument along with pb_dir if you
want the full url to be constructed.
pb_dir : str, optional
The base file name. Use this argument along with file_name if
you want the full url to be constructed.
Returns
-------
remote_file_size : int
Size of the file in bytes
"""
# Option to construct the url
if file_name and pb_dir:
url = posixpath.join(config.db_index_url, pb_dir, file_name)
response = requests.head(url, headers={'Accept-Encoding': 'identity'})
# Raise HTTPError if invalid url
response.raise_for_status()
# Supposed size of the file
remote_file_size = int(response.headers['content-length'])
return remote_file_size | python | def _remote_file_size(url=None, file_name=None, pb_dir=None):
"""
Get the remote file size in bytes
Parameters
----------
url : str, optional
The full url of the file. Use this option to explicitly
state the full url.
file_name : str, optional
The base file name. Use this argument along with pb_dir if you
want the full url to be constructed.
pb_dir : str, optional
The base file name. Use this argument along with file_name if
you want the full url to be constructed.
Returns
-------
remote_file_size : int
Size of the file in bytes
"""
# Option to construct the url
if file_name and pb_dir:
url = posixpath.join(config.db_index_url, pb_dir, file_name)
response = requests.head(url, headers={'Accept-Encoding': 'identity'})
# Raise HTTPError if invalid url
response.raise_for_status()
# Supposed size of the file
remote_file_size = int(response.headers['content-length'])
return remote_file_size | [
"def",
"_remote_file_size",
"(",
"url",
"=",
"None",
",",
"file_name",
"=",
"None",
",",
"pb_dir",
"=",
"None",
")",
":",
"# Option to construct the url",
"if",
"file_name",
"and",
"pb_dir",
":",
"url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"d... | Get the remote file size in bytes
Parameters
----------
url : str, optional
The full url of the file. Use this option to explicitly
state the full url.
file_name : str, optional
The base file name. Use this argument along with pb_dir if you
want the full url to be constructed.
pb_dir : str, optional
The base file name. Use this argument along with file_name if
you want the full url to be constructed.
Returns
-------
remote_file_size : int
Size of the file in bytes | [
"Get",
"the",
"remote",
"file",
"size",
"in",
"bytes"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L35-L69 | train | 216,189 |
MIT-LCP/wfdb-python | wfdb/io/download.py | _stream_header | def _stream_header(file_name, pb_dir):
"""
Stream the lines of a remote header file.
Parameters
----------
file_name : str
pb_dir : str
The Physiobank database directory from which to find the
required header file. eg. For file '100.hea' in
'http://physionet.org/physiobank/database/mitdb', pb_dir='mitdb'.
"""
# Full url of header location
url = posixpath.join(config.db_index_url, pb_dir, file_name)
response = requests.get(url)
# Raise HTTPError if invalid url
response.raise_for_status()
# Get each line as a string
filelines = response.content.decode('iso-8859-1').splitlines()
# Separate content into header and comment lines
header_lines = []
comment_lines = []
for line in filelines:
line = str(line.strip())
# Comment line
if line.startswith('#'):
comment_lines.append(line)
# Non-empty non-comment line = header line.
elif line:
# Look for a comment in the line
ci = line.find('#')
if ci > 0:
header_lines.append(line[:ci])
# comment on same line as header line
comment_lines.append(line[ci:])
else:
header_lines.append(line)
return (header_lines, comment_lines) | python | def _stream_header(file_name, pb_dir):
"""
Stream the lines of a remote header file.
Parameters
----------
file_name : str
pb_dir : str
The Physiobank database directory from which to find the
required header file. eg. For file '100.hea' in
'http://physionet.org/physiobank/database/mitdb', pb_dir='mitdb'.
"""
# Full url of header location
url = posixpath.join(config.db_index_url, pb_dir, file_name)
response = requests.get(url)
# Raise HTTPError if invalid url
response.raise_for_status()
# Get each line as a string
filelines = response.content.decode('iso-8859-1').splitlines()
# Separate content into header and comment lines
header_lines = []
comment_lines = []
for line in filelines:
line = str(line.strip())
# Comment line
if line.startswith('#'):
comment_lines.append(line)
# Non-empty non-comment line = header line.
elif line:
# Look for a comment in the line
ci = line.find('#')
if ci > 0:
header_lines.append(line[:ci])
# comment on same line as header line
comment_lines.append(line[ci:])
else:
header_lines.append(line)
return (header_lines, comment_lines) | [
"def",
"_stream_header",
"(",
"file_name",
",",
"pb_dir",
")",
":",
"# Full url of header location",
"url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"db_index_url",
",",
"pb_dir",
",",
"file_name",
")",
"response",
"=",
"requests",
".",
"get",
"(",
... | Stream the lines of a remote header file.
Parameters
----------
file_name : str
pb_dir : str
The Physiobank database directory from which to find the
required header file. eg. For file '100.hea' in
'http://physionet.org/physiobank/database/mitdb', pb_dir='mitdb'. | [
"Stream",
"the",
"lines",
"of",
"a",
"remote",
"header",
"file",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L71-L115 | train | 216,190 |
MIT-LCP/wfdb-python | wfdb/io/download.py | _stream_dat | def _stream_dat(file_name, pb_dir, byte_count, start_byte, dtype):
"""
Stream data from a remote dat file, into a 1d numpy array.
Parameters
----------
file_name : str
The name of the dat file to be read.
pb_dir : str
The physiobank directory where the dat file is located.
byte_count : int
The number of bytes to be read.
start_byte : int
The starting byte number to read from.
dtype : str
The numpy dtype to load the data into.
Returns
-------
sig_data : numpy array
The data read from the dat file.
"""
# Full url of dat file
url = posixpath.join(config.db_index_url, pb_dir, file_name)
# Specify the byte range
end_byte = start_byte + byte_count - 1
headers = {"Range":"bytes=%d-%d" % (start_byte, end_byte),
'Accept-Encoding': '*'}
# Get the content
response = requests.get(url, headers=headers, stream=True)
# Raise HTTPError if invalid url
response.raise_for_status()
# Convert to numpy array
sig_data = np.fromstring(response.content, dtype=dtype)
return sig_data | python | def _stream_dat(file_name, pb_dir, byte_count, start_byte, dtype):
"""
Stream data from a remote dat file, into a 1d numpy array.
Parameters
----------
file_name : str
The name of the dat file to be read.
pb_dir : str
The physiobank directory where the dat file is located.
byte_count : int
The number of bytes to be read.
start_byte : int
The starting byte number to read from.
dtype : str
The numpy dtype to load the data into.
Returns
-------
sig_data : numpy array
The data read from the dat file.
"""
# Full url of dat file
url = posixpath.join(config.db_index_url, pb_dir, file_name)
# Specify the byte range
end_byte = start_byte + byte_count - 1
headers = {"Range":"bytes=%d-%d" % (start_byte, end_byte),
'Accept-Encoding': '*'}
# Get the content
response = requests.get(url, headers=headers, stream=True)
# Raise HTTPError if invalid url
response.raise_for_status()
# Convert to numpy array
sig_data = np.fromstring(response.content, dtype=dtype)
return sig_data | [
"def",
"_stream_dat",
"(",
"file_name",
",",
"pb_dir",
",",
"byte_count",
",",
"start_byte",
",",
"dtype",
")",
":",
"# Full url of dat file",
"url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"db_index_url",
",",
"pb_dir",
",",
"file_name",
")",
"# ... | Stream data from a remote dat file, into a 1d numpy array.
Parameters
----------
file_name : str
The name of the dat file to be read.
pb_dir : str
The physiobank directory where the dat file is located.
byte_count : int
The number of bytes to be read.
start_byte : int
The starting byte number to read from.
dtype : str
The numpy dtype to load the data into.
Returns
-------
sig_data : numpy array
The data read from the dat file. | [
"Stream",
"data",
"from",
"a",
"remote",
"dat",
"file",
"into",
"a",
"1d",
"numpy",
"array",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L118-L159 | train | 216,191 |
MIT-LCP/wfdb-python | wfdb/io/download.py | _stream_annotation | def _stream_annotation(file_name, pb_dir):
"""
Stream an entire remote annotation file from physiobank
Parameters
----------
file_name : str
The name of the annotation file to be read.
pb_dir : str
The physiobank directory where the annotation file is located.
"""
# Full url of annotation file
url = posixpath.join(config.db_index_url, pb_dir, file_name)
# Get the content
response = requests.get(url)
# Raise HTTPError if invalid url
response.raise_for_status()
# Convert to numpy array
ann_data = np.fromstring(response.content, dtype=np.dtype('<u1'))
return ann_data | python | def _stream_annotation(file_name, pb_dir):
"""
Stream an entire remote annotation file from physiobank
Parameters
----------
file_name : str
The name of the annotation file to be read.
pb_dir : str
The physiobank directory where the annotation file is located.
"""
# Full url of annotation file
url = posixpath.join(config.db_index_url, pb_dir, file_name)
# Get the content
response = requests.get(url)
# Raise HTTPError if invalid url
response.raise_for_status()
# Convert to numpy array
ann_data = np.fromstring(response.content, dtype=np.dtype('<u1'))
return ann_data | [
"def",
"_stream_annotation",
"(",
"file_name",
",",
"pb_dir",
")",
":",
"# Full url of annotation file",
"url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"db_index_url",
",",
"pb_dir",
",",
"file_name",
")",
"# Get the content",
"response",
"=",
"requests... | Stream an entire remote annotation file from physiobank
Parameters
----------
file_name : str
The name of the annotation file to be read.
pb_dir : str
The physiobank directory where the annotation file is located. | [
"Stream",
"an",
"entire",
"remote",
"annotation",
"file",
"from",
"physiobank"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L162-L185 | train | 216,192 |
MIT-LCP/wfdb-python | wfdb/io/download.py | get_dbs | def get_dbs():
"""
Get a list of all the Physiobank databases available.
Examples
--------
>>> dbs = get_dbs()
"""
url = posixpath.join(config.db_index_url, 'DBS')
response = requests.get(url)
dbs = response.content.decode('ascii').splitlines()
dbs = [re.sub('\t{2,}', '\t', line).split('\t') for line in dbs]
return dbs | python | def get_dbs():
"""
Get a list of all the Physiobank databases available.
Examples
--------
>>> dbs = get_dbs()
"""
url = posixpath.join(config.db_index_url, 'DBS')
response = requests.get(url)
dbs = response.content.decode('ascii').splitlines()
dbs = [re.sub('\t{2,}', '\t', line).split('\t') for line in dbs]
return dbs | [
"def",
"get_dbs",
"(",
")",
":",
"url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"db_index_url",
",",
"'DBS'",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"dbs",
"=",
"response",
".",
"content",
".",
"decode",
"(",
"'asci... | Get a list of all the Physiobank databases available.
Examples
--------
>>> dbs = get_dbs() | [
"Get",
"a",
"list",
"of",
"all",
"the",
"Physiobank",
"databases",
"available",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L188-L203 | train | 216,193 |
MIT-LCP/wfdb-python | wfdb/io/download.py | get_record_list | def get_record_list(db_dir, records='all'):
"""
Get a list of records belonging to a database.
Parameters
----------
db_dir : str
The database directory, usually the same as the database slug.
The location to look for a RECORDS file.
records : list, optional
A Option used when this function acts as a helper function.
Leave as default 'all' to get all records.
Examples
--------
>>> wfdb.get_record_list('mitdb')
"""
# Full url physiobank database
db_url = posixpath.join(config.db_index_url, db_dir)
# Check for a RECORDS file
if records == 'all':
response = requests.get(posixpath.join(db_url, 'RECORDS'))
if response.status_code == 404:
raise ValueError('The database %s has no WFDB files to download' % db_url)
# Get each line as a string
record_list = response.content.decode('ascii').splitlines()
# Otherwise the records are input manually
else:
record_list = records
return record_list | python | def get_record_list(db_dir, records='all'):
"""
Get a list of records belonging to a database.
Parameters
----------
db_dir : str
The database directory, usually the same as the database slug.
The location to look for a RECORDS file.
records : list, optional
A Option used when this function acts as a helper function.
Leave as default 'all' to get all records.
Examples
--------
>>> wfdb.get_record_list('mitdb')
"""
# Full url physiobank database
db_url = posixpath.join(config.db_index_url, db_dir)
# Check for a RECORDS file
if records == 'all':
response = requests.get(posixpath.join(db_url, 'RECORDS'))
if response.status_code == 404:
raise ValueError('The database %s has no WFDB files to download' % db_url)
# Get each line as a string
record_list = response.content.decode('ascii').splitlines()
# Otherwise the records are input manually
else:
record_list = records
return record_list | [
"def",
"get_record_list",
"(",
"db_dir",
",",
"records",
"=",
"'all'",
")",
":",
"# Full url physiobank database",
"db_url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"db_index_url",
",",
"db_dir",
")",
"# Check for a RECORDS file",
"if",
"records",
"==",... | Get a list of records belonging to a database.
Parameters
----------
db_dir : str
The database directory, usually the same as the database slug.
The location to look for a RECORDS file.
records : list, optional
A Option used when this function acts as a helper function.
Leave as default 'all' to get all records.
Examples
--------
>>> wfdb.get_record_list('mitdb') | [
"Get",
"a",
"list",
"of",
"records",
"belonging",
"to",
"a",
"database",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L208-L241 | train | 216,194 |
MIT-LCP/wfdb-python | wfdb/io/download.py | make_local_dirs | def make_local_dirs(dl_dir, dl_inputs, keep_subdirs):
"""
Make any required local directories to prepare for downloading
"""
# Make the local download dir if it doesn't exist
if not os.path.isdir(dl_dir):
os.makedirs(dl_dir)
print('Created local base download directory: %s' % dl_dir)
# Create all required local subdirectories
# This must be out of dl_pb_file to
# avoid clash in multiprocessing
if keep_subdirs:
dl_dirs = set([os.path.join(dl_dir, d[1]) for d in dl_inputs])
for d in dl_dirs:
if not os.path.isdir(d):
os.makedirs(d)
return | python | def make_local_dirs(dl_dir, dl_inputs, keep_subdirs):
"""
Make any required local directories to prepare for downloading
"""
# Make the local download dir if it doesn't exist
if not os.path.isdir(dl_dir):
os.makedirs(dl_dir)
print('Created local base download directory: %s' % dl_dir)
# Create all required local subdirectories
# This must be out of dl_pb_file to
# avoid clash in multiprocessing
if keep_subdirs:
dl_dirs = set([os.path.join(dl_dir, d[1]) for d in dl_inputs])
for d in dl_dirs:
if not os.path.isdir(d):
os.makedirs(d)
return | [
"def",
"make_local_dirs",
"(",
"dl_dir",
",",
"dl_inputs",
",",
"keep_subdirs",
")",
":",
"# Make the local download dir if it doesn't exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dl_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"dl_dir",
")",
"pr... | Make any required local directories to prepare for downloading | [
"Make",
"any",
"required",
"local",
"directories",
"to",
"prepare",
"for",
"downloading"
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L277-L294 | train | 216,195 |
MIT-LCP/wfdb-python | wfdb/io/download.py | dl_pb_file | def dl_pb_file(inputs):
"""
Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap...
"""
basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs
# Full url of file
url = posixpath.join(config.db_index_url, db, subdir, basefile)
# Supposed size of the file
remote_file_size = _remote_file_size(url)
# Figure out where the file should be locally
if keep_subdirs:
dldir = os.path.join(dl_dir, subdir)
else:
dldir = dl_dir
local_file = os.path.join(dldir, basefile)
# The file exists locally.
if os.path.isfile(local_file):
# Redownload regardless
if overwrite:
dl_full_file(url, local_file)
# Process accordingly.
else:
local_file_size = os.path.getsize(local_file)
# Local file is smaller than it should be. Append it.
if local_file_size < remote_file_size:
print('Detected partially downloaded file: %s Appending file...' % local_file)
headers = {"Range": "bytes="+str(local_file_size)+"-", 'Accept-Encoding': '*'}
r = requests.get(url, headers=headers, stream=True)
print('headers: ', headers)
print('r content length: ', len(r.content))
with open(local_file, 'ba') as writefile:
writefile.write(r.content)
print('Done appending.')
# Local file is larger than it should be. Redownload.
elif local_file_size > remote_file_size:
dl_full_file(url, local_file)
# If they're the same size, do nothing.
# The file doesn't exist. Download it.
else:
dl_full_file(url, local_file)
return | python | def dl_pb_file(inputs):
"""
Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap...
"""
basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs
# Full url of file
url = posixpath.join(config.db_index_url, db, subdir, basefile)
# Supposed size of the file
remote_file_size = _remote_file_size(url)
# Figure out where the file should be locally
if keep_subdirs:
dldir = os.path.join(dl_dir, subdir)
else:
dldir = dl_dir
local_file = os.path.join(dldir, basefile)
# The file exists locally.
if os.path.isfile(local_file):
# Redownload regardless
if overwrite:
dl_full_file(url, local_file)
# Process accordingly.
else:
local_file_size = os.path.getsize(local_file)
# Local file is smaller than it should be. Append it.
if local_file_size < remote_file_size:
print('Detected partially downloaded file: %s Appending file...' % local_file)
headers = {"Range": "bytes="+str(local_file_size)+"-", 'Accept-Encoding': '*'}
r = requests.get(url, headers=headers, stream=True)
print('headers: ', headers)
print('r content length: ', len(r.content))
with open(local_file, 'ba') as writefile:
writefile.write(r.content)
print('Done appending.')
# Local file is larger than it should be. Redownload.
elif local_file_size > remote_file_size:
dl_full_file(url, local_file)
# If they're the same size, do nothing.
# The file doesn't exist. Download it.
else:
dl_full_file(url, local_file)
return | [
"def",
"dl_pb_file",
"(",
"inputs",
")",
":",
"basefile",
",",
"subdir",
",",
"db",
",",
"dl_dir",
",",
"keep_subdirs",
",",
"overwrite",
"=",
"inputs",
"# Full url of file",
"url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"db_index_url",
",",
"d... | Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap... | [
"Download",
"a",
"file",
"from",
"physiobank",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L297-L349 | train | 216,196 |
MIT-LCP/wfdb-python | wfdb/io/download.py | dl_full_file | def dl_full_file(url, save_file_name):
"""
Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as
"""
response = requests.get(url)
with open(save_file_name, 'wb') as writefile:
writefile.write(response.content)
return | python | def dl_full_file(url, save_file_name):
"""
Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as
"""
response = requests.get(url)
with open(save_file_name, 'wb') as writefile:
writefile.write(response.content)
return | [
"def",
"dl_full_file",
"(",
"url",
",",
"save_file_name",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"with",
"open",
"(",
"save_file_name",
",",
"'wb'",
")",
"as",
"writefile",
":",
"writefile",
".",
"write",
"(",
"response",
"."... | Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as | [
"Download",
"a",
"file",
".",
"No",
"checks",
"are",
"performed",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L352-L368 | train | 216,197 |
MIT-LCP/wfdb-python | wfdb/io/download.py | dl_files | def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False):
"""
Download specified files from a Physiobank database.
Parameters
----------
db : str
The Physiobank database directory to download. eg. For database:
'http://physionet.org/physiobank/database/mitdb', db='mitdb'.
dl_dir : str
The full local directory path in which to download the files.
files : list
A list of strings specifying the file names to download relative to the
database base directory.
keep_subdirs : bool, optional
Whether to keep the relative subdirectories of downloaded files as they
are organized in Physiobank (True), or to download all files into the
same base directory (False).
overwrite : bool, optional
If True, all files will be redownloaded regardless. If False, existing
files with the same name and relative subdirectory will be checked.
If the local file is the same size as the online file, the download is
skipped. If the local file is larger, it will be deleted and the file
will be redownloaded. If the local file is smaller, the file will be
assumed to be partially downloaded and the remaining bytes will be
downloaded and appended.
Examples
--------
>>> wfdb.dl_files('ahadb', os.getcwd(),
['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea',
'data/001a.dat'])
"""
# Full url physiobank database
db_url = posixpath.join(config.db_index_url, db)
# Check if the database is valid
response = requests.get(db_url)
response.raise_for_status()
# Construct the urls to download
dl_inputs = [(os.path.split(file)[1], os.path.split(file)[0], db, dl_dir, keep_subdirs, overwrite) for file in files]
# Make any required local directories
make_local_dirs(dl_dir, dl_inputs, keep_subdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(dl_pb_file, dl_inputs)
print('Finished downloading files')
return | python | def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False):
"""
Download specified files from a Physiobank database.
Parameters
----------
db : str
The Physiobank database directory to download. eg. For database:
'http://physionet.org/physiobank/database/mitdb', db='mitdb'.
dl_dir : str
The full local directory path in which to download the files.
files : list
A list of strings specifying the file names to download relative to the
database base directory.
keep_subdirs : bool, optional
Whether to keep the relative subdirectories of downloaded files as they
are organized in Physiobank (True), or to download all files into the
same base directory (False).
overwrite : bool, optional
If True, all files will be redownloaded regardless. If False, existing
files with the same name and relative subdirectory will be checked.
If the local file is the same size as the online file, the download is
skipped. If the local file is larger, it will be deleted and the file
will be redownloaded. If the local file is smaller, the file will be
assumed to be partially downloaded and the remaining bytes will be
downloaded and appended.
Examples
--------
>>> wfdb.dl_files('ahadb', os.getcwd(),
['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea',
'data/001a.dat'])
"""
# Full url physiobank database
db_url = posixpath.join(config.db_index_url, db)
# Check if the database is valid
response = requests.get(db_url)
response.raise_for_status()
# Construct the urls to download
dl_inputs = [(os.path.split(file)[1], os.path.split(file)[0], db, dl_dir, keep_subdirs, overwrite) for file in files]
# Make any required local directories
make_local_dirs(dl_dir, dl_inputs, keep_subdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(dl_pb_file, dl_inputs)
print('Finished downloading files')
return | [
"def",
"dl_files",
"(",
"db",
",",
"dl_dir",
",",
"files",
",",
"keep_subdirs",
"=",
"True",
",",
"overwrite",
"=",
"False",
")",
":",
"# Full url physiobank database",
"db_url",
"=",
"posixpath",
".",
"join",
"(",
"config",
".",
"db_index_url",
",",
"db",
... | Download specified files from a Physiobank database.
Parameters
----------
db : str
The Physiobank database directory to download. eg. For database:
'http://physionet.org/physiobank/database/mitdb', db='mitdb'.
dl_dir : str
The full local directory path in which to download the files.
files : list
A list of strings specifying the file names to download relative to the
database base directory.
keep_subdirs : bool, optional
Whether to keep the relative subdirectories of downloaded files as they
are organized in Physiobank (True), or to download all files into the
same base directory (False).
overwrite : bool, optional
If True, all files will be redownloaded regardless. If False, existing
files with the same name and relative subdirectory will be checked.
If the local file is the same size as the online file, the download is
skipped. If the local file is larger, it will be deleted and the file
will be redownloaded. If the local file is smaller, the file will be
assumed to be partially downloaded and the remaining bytes will be
downloaded and appended.
Examples
--------
>>> wfdb.dl_files('ahadb', os.getcwd(),
['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea',
'data/001a.dat']) | [
"Download",
"specified",
"files",
"from",
"a",
"Physiobank",
"database",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L371-L425 | train | 216,198 |
MIT-LCP/wfdb-python | wfdb/io/annotation.py | label_triplets_to_df | def label_triplets_to_df(triplets):
"""
Get a pd dataframe from a tuple triplets
used to define annotation labels.
The triplets should come in the
form: (label_store, symbol, description)
"""
label_df = pd.DataFrame({'label_store':np.array([t[0] for t in triplets],
dtype='int'),
'symbol':[t[1] for t in triplets],
'description':[t[2] for t in triplets]})
label_df.set_index(label_df['label_store'].values, inplace=True)
label_df = label_df[list(ann_label_fields)]
return label_df | python | def label_triplets_to_df(triplets):
"""
Get a pd dataframe from a tuple triplets
used to define annotation labels.
The triplets should come in the
form: (label_store, symbol, description)
"""
label_df = pd.DataFrame({'label_store':np.array([t[0] for t in triplets],
dtype='int'),
'symbol':[t[1] for t in triplets],
'description':[t[2] for t in triplets]})
label_df.set_index(label_df['label_store'].values, inplace=True)
label_df = label_df[list(ann_label_fields)]
return label_df | [
"def",
"label_triplets_to_df",
"(",
"triplets",
")",
":",
"label_df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'label_store'",
":",
"np",
".",
"array",
"(",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"triplets",
"]",
",",
"dtype",
"=",
"'int'",
")",
... | Get a pd dataframe from a tuple triplets
used to define annotation labels.
The triplets should come in the
form: (label_store, symbol, description) | [
"Get",
"a",
"pd",
"dataframe",
"from",
"a",
"tuple",
"triplets",
"used",
"to",
"define",
"annotation",
"labels",
"."
] | cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L925-L942 | train | 216,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.